repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
siddhantgoel/tornado-sqlalchemy | tornado_sqlalchemy/__init__.py | 1 | 8328 | import multiprocessing
from concurrent.futures import Executor, ThreadPoolExecutor
from contextlib import contextmanager
from typing import Callable, Iterator, Optional
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from tornado.concurrent import Future, chain_future
from tornado.ioloop import IOLoop
from tornado.web import Application
__all__ = ('as_future', 'SessionMixin', 'set_max_workers', 'SQLAlchemy')
class MissingFactoryError(Exception):
pass
class MissingDatabaseSettingError(Exception):
pass
class _AsyncExecution:
"""Tiny wrapper around ThreadPoolExecutor. This class is not meant to be
instantiated externally, but internally we just use it as a wrapper around
ThreadPoolExecutor so we can control the pool size and make the
`as_future` function public.
"""
def __init__(self, max_workers: Optional[int] = None):
self._max_workers = (
max_workers or multiprocessing.cpu_count()
) # type: int
self._pool = None # type: Optional[Executor]
def set_max_workers(self, count: int):
if self._pool:
self._pool.shutdown(wait=True)
self._max_workers = count
self._pool = ThreadPoolExecutor(max_workers=self._max_workers)
def as_future(self, query: Callable) -> Future:
# concurrent.futures.Future is not compatible with the "new style"
# asyncio Future, and awaiting on such "old-style" futures does not
# work.
#
# tornado includes a `run_in_executor` function to help with this
# problem, but it's only included in version 5+. Hence, we copy a
# little bit of code here to handle this incompatibility.
if not self._pool:
self._pool = ThreadPoolExecutor(max_workers=self._max_workers)
old_future = self._pool.submit(query)
new_future = Future() # type: Future
IOLoop.current().add_future(
old_future, lambda f: chain_future(f, new_future)
)
return new_future
class SessionMixin:
_session = None # type: Optional[Session]
application = None # type: Optional[Application]
@contextmanager
def make_session(self) -> Iterator[Session]:
session = None
try:
session = self._make_session()
yield session
except Exception:
if session:
session.rollback()
raise
else:
session.commit()
finally:
if session:
session.close()
def on_finish(self):
next_on_finish = None
try:
next_on_finish = super(SessionMixin, self).on_finish
except AttributeError:
pass
if self._session:
self._session.commit()
self._session.close()
if next_on_finish:
next_on_finish()
@property
def session(self) -> Session:
if not self._session:
self._session = self._make_session()
return self._session
def _make_session(self) -> Session:
if not self.application:
raise MissingFactoryError()
db = self.application.settings.get('db')
if not db:
raise MissingDatabaseSettingError()
return db.sessionmaker()
_async_exec = _AsyncExecution()
as_future = _async_exec.as_future
set_max_workers = _async_exec.set_max_workers
class SessionEx(Session):
"""The SessionEx extends the default session system with bind selection.
"""
def __init__(self, db, autocommit=False, autoflush=True, **options):
self.db = db
bind = options.pop('bind', None) or db.engine
binds = options.pop('binds', db.get_binds())
super().__init__(
autocommit=autocommit,
autoflush=autoflush,
bind=bind,
binds=binds,
**options
)
def get_bind(self, mapper=None, clause=None):
"""Return the engine or connection for a given model or
table, using the `__bind_key__` if it is set.
"""
if mapper is not None:
try:
# SA >= 1.3
persist_selectable = mapper.persist_selectable
except AttributeError:
# SA < 1.3
persist_selectable = mapper.mapped_table
info = getattr(persist_selectable, 'info', {})
bind_key = info.get('bind_key')
if bind_key is not None:
return self.db.get_engine(bind=bind_key)
return super().get_bind(mapper, clause)
class BindMeta(DeclarativeMeta):
def __init__(cls, name, bases, d):
bind_key = d.pop('__bind_key__', None) or getattr(
cls, '__bind_key__', None
)
super(BindMeta, cls).__init__(name, bases, d)
if (
bind_key is not None
and getattr(cls, '__table__', None) is not None
):
cls.__table__.info['bind_key'] = bind_key
class SQLAlchemy:
def __init__(
self, url=None, binds=None, session_options=None, engine_options=None
):
self.Model = self.make_declarative_base()
self._engines = {}
self.configure(
url=url,
binds=binds,
session_options=session_options,
engine_options=engine_options,
)
def configure(
self, url=None, binds=None, session_options=None, engine_options=None
):
self.url = url
self.binds = binds or {}
self._engine_options = engine_options or {}
self.sessionmaker = sessionmaker(
class_=SessionEx, db=self, **(session_options or {})
)
@property
def engine(self):
return self.get_engine()
@property
def metadata(self):
return self.Model.metadata
def create_engine(self, bind=None):
if not self.url and not self.binds:
raise MissingDatabaseSettingError()
if bind is None:
url = self.url
else:
if bind not in self.binds:
raise RuntimeError('bind {} undefined.'.format(bind))
url = self.binds[bind]
return create_engine(url, **self._engine_options)
def get_engine(self, bind=None):
"""Returns a specific engine. cached in self._engines """
engine = self._engines.get(bind)
if engine is None:
engine = self.create_engine(bind)
self._engines[bind] = engine
return engine
def get_tables_for_bind(self, bind=None):
"""Returns a list of all tables relevant for a bind."""
return [
table
for table in self.Model.metadata.tables.values()
if table.info.get('bind_key') == bind
]
def get_binds(self):
"""Returns a dictionary with a table->engine mapping.
This is suitable for use of sessionmaker(binds=db.get_binds()).
"""
binds = [None] + list(self.binds)
result = {}
for bind in binds:
engine = self.get_engine(bind)
tables = self.get_tables_for_bind(bind)
result.update(dict((table, engine) for table in tables))
return result
def _execute_for_all_tables(self, bind, operation, skip_tables=False):
if bind == '__all__':
binds = [None] + list(self.binds)
elif isinstance(bind, str) or bind is None:
binds = [bind]
else:
binds = bind
for bind in binds:
extra = {}
if not skip_tables:
tables = self.get_tables_for_bind(bind)
extra['tables'] = tables
op = getattr(self.Model.metadata, operation)
op(bind=self.get_engine(bind), **extra)
def create_all(self, bind='__all__'):
"""Creates all tables.
"""
self._execute_for_all_tables(bind, 'create_all')
def drop_all(self, bind='__all__'):
"""Drops all tables.
"""
self._execute_for_all_tables(bind, 'drop_all')
def make_declarative_base(self):
return declarative_base(metaclass=BindMeta)
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/8NodeBrick/cantilever_different_Poisson/NumberOfDivision1/PoissonRatio0.25/compare_essi_version.py | 409 | 1085 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
# automatically find the script directory.
# script_dir=sys.argv[1]
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own command line color function
# from essi_max_disp_fun import find_max_disp
from mycolor_fun import *
print headblankline()
print headblankline()
print headstart(), "Original ESSI version information:"
fin=open("original.log")
for line in fin:
if 'Version' in line:
print headstep(), line,
if 'Compiled' in line:
print headstep(), line,
if 'Time Now' in line:
print headstep(), line,
if not line: break
print headblankline()
print headstart(), "New ESSI version information:"
fin=open("new.log")
for line in fin:
if 'Version' in line:
print headstep(), line,
if 'Compiled' in line:
print headstep(), line,
if 'Time Now' in line:
print headstep(), line,
if not line: break
print headstart()
print headblankline() | cc0-1.0 |
h3biomed/ansible-modules-extras | packaging/os/pkg5_publisher.py | 23 | 5898 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: pkg5_publisher
author: "Peter Oliver (@mavit)"
short_description: Manages Solaris 11 Image Packaging System publishers
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
- This modules will configure which publishers a client will download IPS
packages from.
options:
name:
description:
- The publisher's name.
required: true
aliases: [ publisher ]
state:
description:
- Whether to ensure that a publisher is present or absent.
required: false
default: present
choices: [ present, absent ]
sticky:
description:
- Packages installed from a sticky repository can only receive updates
from that repository.
required: false
default: null
choices: [ true, false ]
enabled:
description:
- Is the repository enabled or disabled?
required: false
default: null
choices: [ true, false ]
origin:
description:
- A path or URL to the repository.
- Multiple values may be provided.
required: false
default: null
mirror:
description:
- A path or URL to the repository mirror.
- Multiple values may be provided.
required: false
default: null
'''
EXAMPLES = '''
# Fetch packages for the solaris publisher direct from Oracle:
- pkg5_publisher:
name: solaris
sticky: true
origin: https://pkg.oracle.com/solaris/support/
# Configure a publisher for locally-produced packages:
- pkg5_publisher:
name: site
origin: 'https://pkg.example.com/site/'
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['publisher']),
state=dict(default='present', choices=['present', 'absent']),
sticky=dict(type='bool'),
enabled=dict(type='bool'),
# search_after=dict(),
# search_before=dict(),
origin=dict(type='list'),
mirror=dict(type='list'),
)
)
for option in ['origin', 'mirror']:
if module.params[option] == ['']:
module.params[option] = []
if module.params['state'] == 'present':
modify_publisher(module, module.params)
else:
unset_publisher(module, module.params['name'])
def modify_publisher(module, params):
name = params['name']
existing = get_publishers(module)
if name in existing:
for option in ['origin', 'mirror', 'sticky', 'enabled']:
if params[option] != None:
if params[option] != existing[name][option]:
return set_publisher(module, params)
else:
return set_publisher(module, params)
module.exit_json()
def set_publisher(module, params):
name = params['name']
args = []
if params['origin'] != None:
args.append('--remove-origin=*')
args.extend(['--add-origin=' + u for u in params['origin']])
if params['mirror'] != None:
args.append('--remove-mirror=*')
args.extend(['--add-mirror=' + u for u in params['mirror']])
if params['sticky'] != None and params['sticky']:
args.append('--sticky')
elif params['sticky'] != None:
args.append('--non-sticky')
if params['enabled'] != None and params['enabled']:
args.append('--enable')
elif params['enabled'] != None:
args.append('--disable')
rc, out, err = module.run_command(
["pkg", "set-publisher"] + args + [name],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
module.exit_json(**response)
def unset_publisher(module, publisher):
if not publisher in get_publishers(module):
module.exit_json()
rc, out, err = module.run_command(
["pkg", "unset-publisher", publisher],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
module.exit_json(**response)
def get_publishers(module):
rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
lines = out.splitlines()
keys = lines.pop(0).lower().split("\t")
publishers = {}
for line in lines:
values = dict(zip(keys, map(unstringify, line.split("\t"))))
name = values['publisher']
if not name in publishers:
publishers[name] = dict(
(k, values[k]) for k in ['sticky', 'enabled']
)
publishers[name]['origin'] = []
publishers[name]['mirror'] = []
if values['type'] is not None:
publishers[name][values['type']].append(values['uri'])
return publishers
def unstringify(val):
if val == "-" or val == '':
return None
elif val == "true":
return True
elif val == "false":
return False
else:
return val
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
fxstein/cement | tests/core/controller_tests.py | 2 | 10692 | """Tests for cement.core.controller."""
import re
from cement.core import exc, controller
from cement.utils import test
from cement.utils.misc import rando, init_defaults
APP = "app-%s" % rando()[:12]
class TestController(controller.CementBaseController):
class Meta:
label = 'base'
arguments = [
(['-f', '--foo'], dict(help='foo option'))
]
usage = 'My Custom Usage TXT'
epilog = "This is the epilog"
@controller.expose(hide=True)
def default(self):
pass
@controller.expose()
def some_command(self):
pass
class TestWithPositionalController(controller.CementBaseController):
class Meta:
label = 'base'
arguments = [
(['foo'], dict(help='foo option', nargs='?'))
]
@controller.expose(hide=True)
def default(self):
self.app.render(dict(foo=self.app.pargs.foo))
class Embedded(controller.CementBaseController):
class Meta:
label = 'embedded_controller'
stacked_on = 'base'
stacked_type = 'embedded'
arguments = [(['-t'], dict())]
@controller.expose(aliases=['emcmd1'], help='This is my help txt')
def embedded_cmd1(self):
pass
class Nested(controller.CementBaseController):
class Meta:
label = 'nested_controller'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [(['-t'], dict())]
@controller.expose()
def nested_cmd1(self):
pass
class AliasesOnly(controller.CementBaseController):
class Meta:
label = 'aliases_only_controller'
stacked_on = 'base'
stacked_type = 'nested'
aliases = ['this_is_ao_controller']
aliases_only = True
@controller.expose(aliases=['ao_cmd1'], aliases_only=True)
def aliases_only_cmd1(self):
pass
@controller.expose(aliases=['ao_cmd2', 'ao2'], aliases_only=True)
def aliases_only_cmd2(self):
pass
class DuplicateCommand(controller.CementBaseController):
class Meta:
label = 'duplicate_command'
stacked_on = 'base'
stacked_type = 'embedded'
@controller.expose()
def default(self):
pass
class DuplicateAlias(controller.CementBaseController):
class Meta:
label = 'duplicate_command'
stacked_on = 'base'
stacked_type = 'embedded'
@controller.expose(aliases=['default'])
def cmd(self):
pass
class Bad(controller.CementBaseController):
class Meta:
label = 'bad_controller'
arguments = []
class BadStackedType(controller.CementBaseController):
class Meta:
label = 'bad_stacked_type'
stacked_on = 'base'
stacked_type = 'bogus'
arguments = []
class ArgumentConflict(controller.CementBaseController):
class Meta:
label = 'embedded'
stacked_on = 'base'
stacked_type = 'embedded'
arguments = [(['-f', '--foo'], dict())]
class Unstacked(controller.CementBaseController):
class Meta:
label = 'unstacked'
stacked_on = None
arguments = [
(['--foo6'], dict(dest='foo6')),
]
class BadStackType(controller.CementBaseController):
class Meta:
label = 'bad_stack_type'
stacked_on = 'base'
stacked_type = 'bogus_stacked_type'
arguments = [
(['--foo6'], dict(dest='foo6')),
]
class ControllerTestCase(test.CementCoreTestCase):
def test_default(self):
app = self.make_app(base_controller=TestController)
app.setup()
app.run()
def test_epilog(self):
app = self.make_app(base_controller=TestController)
app.setup()
app.run()
self.eq(app.args.epilog, 'This is the epilog')
def test_txt_defined_base_controller(self):
self.app.handler.register(TestController)
self.app.setup()
@test.raises(exc.InterfaceError)
def test_invalid_arguments_1(self):
Bad.Meta.arguments = ['this is invalid']
self.app.handler.register(Bad)
@test.raises(exc.InterfaceError)
def test_invalid_arguments_2(self):
Bad.Meta.arguments = [('this is also invalid', dict())]
self.app.handler.register(Bad)
@test.raises(exc.InterfaceError)
def test_invalid_arguments_3(self):
Bad.Meta.arguments = [(['-f'], 'and this is invalid')]
self.app.handler.register(Bad)
@test.raises(exc.InterfaceError)
def test_invalid_arguments_4(self):
Bad.Meta.arguments = 'totally jacked'
self.app.handler.register(Bad)
def test_embedded_controller(self):
app = self.make_app(argv=['embedded-cmd1'])
app.handler.register(TestController)
app.handler.register(Embedded)
app.setup()
app.run()
check = 'embedded-cmd1' in app.controller._visible_commands
self.ok(check)
# also check for the alias here
check = 'emcmd1' in app.controller._dispatch_map
self.ok(check)
def test_nested_controller(self):
app = self.make_app(argv=['nested-controller'])
app.handler.register(TestController)
app.handler.register(Nested)
app.setup()
app.run()
check = 'nested-controller' in app.controller._visible_commands
self.ok(check)
self.eq(app.controller._dispatch_command['func_name'], '_dispatch')
def test_aliases_only_controller(self):
app = self.make_app(argv=['aliases-only-controller'])
app.handler.register(TestController)
app.handler.register(AliasesOnly)
app.setup()
app.run()
@test.raises(exc.FrameworkError)
def test_bad_stacked_type(self):
app = self.make_app()
app.handler.register(TestController)
app.handler.register(BadStackedType)
app.setup()
app.run()
@test.raises(exc.FrameworkError)
def test_duplicate_command(self):
app = self.make_app()
app.handler.register(TestController)
app.handler.register(DuplicateCommand)
app.setup()
app.run()
@test.raises(exc.FrameworkError)
def test_duplicate_alias(self):
app = self.make_app()
app.handler.register(TestController)
app.handler.register(DuplicateAlias)
app.setup()
app.run()
def test_usage_txt(self):
app = self.make_app()
app.handler.register(TestController)
app.setup()
self.eq(app.controller._usage_text, 'My Custom Usage TXT')
@test.raises(exc.FrameworkError)
def test_argument_conflict(self):
try:
app = self.make_app(base_controller=TestController)
app.handler.register(ArgumentConflict)
app.setup()
app.run()
except NameError as e:
# This is a hack due to a Travis-CI Bug:
# https://github.com/travis-ci/travis-ci/issues/998
if e.args[0] == "global name 'ngettext' is not defined":
bug = "https://github.com/travis-ci/travis-ci/issues/998"
raise test.SkipTest("Travis-CI Bug: %s" % bug)
else:
raise
def test_default_command_with_positional(self):
app = self.make_app(base_controller=TestWithPositionalController,
argv=['mypositional'])
app.setup()
app.run()
self.eq(app.get_last_rendered()[0]['foo'], 'mypositional')
def test_load_extensions_from_config_list(self):
defaults = init_defaults(APP)
defaults[APP]['extensions'] = ['json', 'yaml']
app = self.make_app(
label=APP,
extensions=[],
config_defaults=defaults,
)
app.setup()
app.run()
res = 'cement.ext.ext_json' in app.ext._loaded_extensions
self.ok(res)
res = 'cement.ext.ext_yaml' in app.ext._loaded_extensions
self.ok(res)
def test_load_extensions_from_config_str(self):
defaults = init_defaults(APP)
defaults[APP]['extensions'] = 'json, yaml'
app = self.make_app(
label=APP,
extensions=[],
config_defaults=defaults,
)
app.setup()
app.run()
res = 'cement.ext.ext_json' in app.ext._loaded_extensions
self.ok(res)
res = 'cement.ext.ext_yaml' in app.ext._loaded_extensions
self.ok(res)
@test.raises(exc.InterfaceError)
def test_invalid_stacked_on(self):
self.reset_backend()
try:
self.app = self.make_app(APP,
handlers=[
TestController,
Unstacked,
],
)
with self.app as app:
res = app.run()
except exc.InterfaceError as e:
self.ok(re.match("(.*)is not stacked anywhere!(.*)", e.msg))
raise
@test.raises(exc.InterfaceError)
def test_invalid_stacked_type(self):
self.reset_backend()
try:
self.app = self.make_app(APP,
handlers=[
TestController,
BadStackType,
],
)
with self.app as app:
res = app.run()
except exc.InterfaceError as e:
self.ok(re.match("(.*)has an unknown stacked type(.*)", e.msg))
raise
def test_usage_text(self):
self.reset_backend()
self.app = self.make_app(APP,
handlers=[
TestController,
],
)
with self.app as app:
self.app.controller._meta.usage = None
usage = app.controller._usage_text
self.ok(usage.startswith('%s (sub-commands ...)' %
self.app._meta.label))
def test_help_text(self):
self.reset_backend()
self.app = self.make_app(APP,
handlers=[
TestController,
AliasesOnly,
],
)
with self.app as app:
app.run()
help = app.controller._help_text
# self.ok(usage.startswith('%s (sub-commands ...)' % \
# self.app._meta.label))
| bsd-3-clause |
olexiim/edx-platform | cms/djangoapps/contentstore/views/user.py | 9 | 7393 | from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from django_future.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from util.json_request import JsonResponse, expect_json
from student.roles import CourseInstructorRole, CourseStaffRole, LibraryUserRole
from course_creators.views import user_requested_access
from student.auth import STUDIO_EDIT_ROLES, STUDIO_VIEW_USERS, get_user_permissions
from student.models import CourseEnrollment
from django.http import HttpResponseNotFound
from student import auth
__all__ = ['request_course_creator', 'course_team_handler']
@require_POST
@login_required
def request_course_creator(request):
"""
User has requested course creation access.
"""
user_requested_access(request.user)
return JsonResponse({"Status": "OK"})
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def course_team_handler(request, course_key_string=None, email=None):
"""
The restful handler for course team users.
GET
html: return html page for managing course team
json: return json representation of a particular course team member (email is required).
POST or PUT
json: modify the permissions for a particular course team member (email is required, as well as role in the payload).
DELETE:
json: remove a particular course team member from the course team (email is required).
"""
course_key = CourseKey.from_string(course_key_string) if course_key_string else None
# No permissions check here - each helper method does its own check.
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
return _course_team_user(request, course_key, email)
elif request.method == 'GET': # assume html
return _manage_users(request, course_key)
else:
return HttpResponseNotFound()
def _manage_users(request, course_key):
"""
This view will return all CMS users who are editors for the specified course
"""
# check that logged in user has permissions to this item
user_perms = get_user_permissions(request.user, course_key)
if not user_perms & STUDIO_VIEW_USERS:
raise PermissionDenied()
course_module = modulestore().get_course(course_key)
instructors = CourseInstructorRole(course_key).users_with_role()
# the page only lists staff and assumes they're a superset of instructors. Do a union to ensure.
staff = set(CourseStaffRole(course_key).users_with_role()).union(instructors)
return render_to_response('manage_users.html', {
'context_course': course_module,
'staff': staff,
'instructors': instructors,
'allow_actions': bool(user_perms & STUDIO_EDIT_ROLES),
})
@expect_json
def _course_team_user(request, course_key, email):
"""
Handle the add, remove, promote, demote requests ensuring the requester has authority
"""
# check that logged in user has permissions to this item
requester_perms = get_user_permissions(request.user, course_key)
permissions_error_response = JsonResponse({"error": _("Insufficient permissions")}, 403)
if (requester_perms & STUDIO_VIEW_USERS) or (email == request.user.email):
# This user has permissions to at least view the list of users or is editing themself
pass
else:
# This user is not even allowed to know who the authorized users are.
return permissions_error_response
try:
user = User.objects.get(email=email)
except Exception:
msg = {
"error": _("Could not find user by email address '{email}'.").format(email=email),
}
return JsonResponse(msg, 404)
is_library = isinstance(course_key, LibraryLocator)
# Ordered list of roles: can always move self to the right, but need STUDIO_EDIT_ROLES to move any user left
if is_library:
role_hierarchy = (CourseInstructorRole, CourseStaffRole, LibraryUserRole)
else:
role_hierarchy = (CourseInstructorRole, CourseStaffRole)
if request.method == "GET":
# just return info about the user
msg = {
"email": user.email,
"active": user.is_active,
"role": None,
}
# what's the highest role that this user has? (How should this report global staff?)
for role in role_hierarchy:
if role(course_key).has_user(user):
msg["role"] = role.ROLE
break
return JsonResponse(msg)
# All of the following code is for editing/promoting/deleting users.
# Check that the user has STUDIO_EDIT_ROLES permission or is editing themselves:
if not ((requester_perms & STUDIO_EDIT_ROLES) or (user.id == request.user.id)):
return permissions_error_response
# can't modify an inactive user
if not user.is_active:
msg = {
"error": _('User {email} has registered but has not yet activated his/her account.').format(email=email),
}
return JsonResponse(msg, 400)
if request.method == "DELETE":
new_role = None
else:
# only other operation supported is to promote/demote a user by changing their role:
# role may be None or "" (equivalent to a DELETE request) but must be set.
# Check that the new role was specified:
if "role" in request.json or "role" in request.POST:
new_role = request.json.get("role", request.POST.get("role"))
else:
return JsonResponse({"error": _("No `role` specified.")}, 400)
old_roles = set()
role_added = False
for role_type in role_hierarchy:
role = role_type(course_key)
if role_type.ROLE == new_role:
if (requester_perms & STUDIO_EDIT_ROLES) or (user.id == request.user.id and old_roles):
# User has STUDIO_EDIT_ROLES permission or
# is currently a member of a higher role, and is thus demoting themself
auth.add_users(request.user, role, user)
role_added = True
else:
return permissions_error_response
elif role.has_user(user):
# Remove the user from this old role:
old_roles.add(role)
if new_role and not role_added:
return JsonResponse({"error": _("Invalid `role` specified.")}, 400)
for role in old_roles:
if isinstance(role, CourseInstructorRole) and role.users_with_role().count() == 1:
msg = {"error": _("You may not remove the last Admin. Add another Admin first.")}
return JsonResponse(msg, 400)
auth.remove_users(request.user, role, user)
if new_role and not is_library:
# The user may be newly added to this course.
# auto-enroll the user in the course so that "View Live" will work.
CourseEnrollment.enroll(user, course_key)
return JsonResponse()
| agpl-3.0 |
Yelp/love | tests/logic/love_test.py | 1 | 3463 | # -*- coding: utf-8 -*-
import unittest
import logic.love
from errors import TaintedLove
from testing.factories import create_alias_with_employee_username
from testing.factories import create_employee
class SendLovesTest(unittest.TestCase):
nosegae_taskqueue = True
nosegae_memcache = True
nosegae_datastore_v3 = True
def setUp(self):
self.alice = create_employee(username='alice')
self.bob = create_employee(username='bob')
self.carol = create_employee(username='carol')
self.message = 'hallo'
def test_send_loves(self):
logic.love.send_loves(
set(['bob', 'carol']),
self.message,
sender_username='alice',
)
loves_for_bob = logic.love.get_love(None, 'bob').get_result()
self.assertEqual(len(loves_for_bob), 1)
self.assertEqual(loves_for_bob[0].sender_key, self.alice.key)
self.assertEqual(loves_for_bob[0].message, self.message)
loves_for_carol = logic.love.get_love(None, 'carol').get_result()
self.assertEqual(len(loves_for_carol), 1)
self.assertEqual(loves_for_carol[0].sender_key, self.alice.key)
self.assertEqual(loves_for_carol[0].message, self.message)
def test_invalid_sender(self):
with self.assertRaises(TaintedLove):
logic.love.send_loves(
set(['alice']),
'hallo',
sender_username='wwu',
)
def test_sender_is_a_recipient(self):
logic.love.send_loves(
set(['bob', 'alice']),
self.message,
sender_username='alice',
)
loves_for_bob = logic.love.get_love('alice', 'bob').get_result()
self.assertEqual(len(loves_for_bob), 1)
self.assertEqual(loves_for_bob[0].message, self.message)
loves_for_alice = logic.love.get_love(None, 'alice').get_result()
self.assertEqual(loves_for_alice, [])
def test_sender_is_only_recipient(self):
with self.assertRaises(TaintedLove):
logic.love.send_loves(
set(['alice']),
self.message,
sender_username='alice',
)
def test_invalid_recipient(self):
with self.assertRaises(TaintedLove):
logic.love.send_loves(
set(['bob', 'dean']),
'hallo',
sender_username='alice',
)
loves_for_bob = logic.love.get_love('alice', 'bob').get_result()
self.assertEqual(loves_for_bob, [])
def test_send_loves_with_alias(self):
message = 'Loving your alias'
create_alias_with_employee_username(name='bobby', username=self.bob.username)
logic.love.send_loves(['bobby'], message, sender_username=self.carol.username)
loves_for_bob = logic.love.get_love('carol', 'bob').get_result()
self.assertEqual(len(loves_for_bob), 1)
self.assertEqual(loves_for_bob[0].sender_key, self.carol.key)
self.assertEqual(loves_for_bob[0].message, message)
def test_send_loves_with_alias_and_username_for_same_user(self):
create_alias_with_employee_username(name='bobby', username=self.bob.username)
with self.assertRaises(TaintedLove):
logic.love.send_loves(['bob', 'bobby'], 'hallo', sender_username='alice')
loves_for_bob = logic.love.get_love('alice', 'bob').get_result()
self.assertEqual(loves_for_bob, [])
| mit |
rdo-management/neutron | neutron/tests/unit/nec/stub_ofc_driver.py | 10 | 12135 | # Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.common import log as call_log
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec import ofc_driver_base
LOG = logging.getLogger(__name__)
MAX_NUM_OPENFLOW_ROUTER = 2
class StubOFCDriver(ofc_driver_base.OFCDriverBase):
"""Stub OFC driver for testing.
This driver can be used not only for unit tests but also for real testing
as a logging driver. It stores the created resources on OFC and returns
them in get methods().
If autocheck is enabled, it checks whether the specified resource exists
in OFC and raises an exception if it is different from expected status.
"""
def __init__(self, conf):
self.autocheck = False
self.reset_all()
def reset_all(self):
self.ofc_tenant_dict = {}
self.ofc_network_dict = {}
self.ofc_port_dict = {}
self.ofc_filter_dict = {}
self.ofc_router_dict = {}
self.ofc_router_inf_dict = {}
self.ofc_router_route_dict = {}
def enable_autocheck(self):
self.autocheck = True
def disable_autocheck(self):
self.autocheck = False
@call_log.log
def create_tenant(self, description, tenant_id=None):
ofc_id = "ofc-" + tenant_id[:-4]
if self.autocheck:
if ofc_id in self.ofc_tenant_dict:
raise Exception(_('(create_tenant) OFC tenant %s '
'already exists') % ofc_id)
self.ofc_tenant_dict[ofc_id] = {'tenant_id': tenant_id,
'description': description}
return ofc_id
@call_log.log
def delete_tenant(self, ofc_tenant_id):
if ofc_tenant_id in self.ofc_tenant_dict:
del self.ofc_tenant_dict[ofc_tenant_id]
else:
if self.autocheck:
raise Exception(_('(delete_tenant) OFC tenant %s not found')
% ofc_tenant_id)
LOG.debug('delete_tenant: SUCCEED')
@call_log.log
def create_network(self, ofc_tenant_id, description, network_id=None):
ofc_id = "ofc-" + network_id[:-4]
if self.autocheck:
if ofc_tenant_id not in self.ofc_tenant_dict:
raise Exception(_('(create_network) OFC tenant %s not found')
% ofc_tenant_id)
if ofc_id in self.ofc_network_dict:
raise Exception(_('(create_network) OFC network %s '
'already exists') % ofc_id)
self.ofc_network_dict[ofc_id] = {'tenant_id': ofc_tenant_id,
'network_id': network_id,
'description': description}
return ofc_id
@call_log.log
def update_network(self, ofc_network_id, description):
if self.autocheck:
if ofc_network_id not in self.ofc_network_dict:
raise Exception(_('(update_network) OFC network %s not found')
% ofc_network_id)
data = {'description': description}
self.ofc_network_dict[ofc_network_id].update(data)
LOG.debug('update_network: SUCCEED')
@call_log.log
def delete_network(self, ofc_network_id):
if ofc_network_id in self.ofc_network_dict:
del self.ofc_network_dict[ofc_network_id]
else:
if self.autocheck:
raise Exception(_('(delete_network) OFC network %s not found')
% ofc_network_id)
LOG.debug('delete_network: SUCCEED')
@call_log.log
def create_port(self, ofc_network_id, info, port_id=None, filters=None):
ofc_id = "ofc-" + port_id[:-4]
if self.autocheck:
if ofc_network_id not in self.ofc_network_dict:
raise Exception(_('(create_port) OFC network %s not found')
% ofc_network_id)
if ofc_id in self.ofc_port_dict:
raise Exception(_('(create_port) OFC port %s already exists')
% ofc_id)
self.ofc_port_dict[ofc_id] = {'network_id': ofc_network_id,
'port_id': port_id}
if filters:
self.ofc_port_dict[ofc_id]['filters'] = filters
return ofc_id
@call_log.log
def delete_port(self, ofc_port_id):
if ofc_port_id in self.ofc_port_dict:
del self.ofc_port_dict[ofc_port_id]
else:
if self.autocheck:
raise Exception(_('(delete_port) OFC port %s not found')
% ofc_port_id)
LOG.debug('delete_port: SUCCEED')
@classmethod
def filter_supported(cls):
return True
def create_filter(self, context, filter_dict, filter_id=None):
return "ofc-" + filter_id[:-4]
def delete_filter(self, ofc_filter_id):
pass
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
return ofc_tenant_id
def convert_ofc_network_id(self, context, ofc_network_id, tenant_id):
return ofc_network_id
def convert_ofc_port_id(self, context, ofc_port_id, tenant_id, network_id):
return ofc_port_id
def convert_ofc_filter_id(self, context, ofc_filter_id):
return ofc_filter_id
router_supported = True
router_nat_supported = True
@call_log.log
def create_router(self, ofc_tenant_id, router_id, description):
ofc_id = "ofc-" + router_id[:-4]
if self.autocheck:
if ofc_tenant_id not in self.ofc_tenant_dict:
raise Exception(_('(create_router) OFC tenant %s not found')
% ofc_tenant_id)
if ofc_id in self.ofc_router_dict:
raise Exception(_('(create_router) OFC router %s '
'already exists') % ofc_id)
if len(self.ofc_router_dict) >= MAX_NUM_OPENFLOW_ROUTER:
params = {'reason': _("Operation on OFC is failed"),
'status': 409}
raise nexc.OFCException(**params)
self.ofc_router_dict[ofc_id] = {'tenant_id': ofc_tenant_id,
'router_id': router_id,
'description': description}
return ofc_id
@call_log.log
def delete_router(self, ofc_router_id):
if ofc_router_id in self.ofc_router_dict:
del self.ofc_router_dict[ofc_router_id]
else:
if self.autocheck:
raise Exception(_('(delete_router) OFC router %s not found')
% ofc_router_id)
LOG.debug('delete_router: SUCCEED')
@call_log.log
def add_router_interface(self, ofc_router_id, ofc_net_id,
ip_address=None, mac_address=None):
if_id = "ofc-" + uuidutils.generate_uuid()[:-4]
# IP address should have a format of a.b.c.d/N
if ip_address != str(netaddr.IPNetwork(ip_address)):
raise Exception(_('(add_router_interface) '
'ip_address %s is not a valid format (a.b.c.d/N).')
% ip_address)
if self.autocheck:
if ofc_router_id not in self.ofc_router_dict:
raise Exception(_('(add_router_interface) '
'OFC router %s not found') % ofc_router_id)
if ofc_net_id not in self.ofc_network_dict:
raise Exception(_('(add_router_interface) '
'OFC network %s not found') % ofc_net_id)
# Check duplicate destination
self.ofc_router_inf_dict[if_id] = {'router_id': ofc_router_id,
'network_id': ofc_net_id,
'ip_address': ip_address,
'mac_address': mac_address}
LOG.debug('add_router_interface: SUCCEED (if_id=%s)', if_id)
return if_id
@call_log.log
def update_router_interface(self, ofc_router_inf_id,
ip_address=None, mac_address=None):
if ofc_router_inf_id not in self.ofc_router_inf_dict:
if self.autocheck:
raise Exception(_('(delete_router_interface) '
'OFC router interface %s not found')
% ofc_router_inf_id)
self.ofc_router_inf_dict[ofc_router_inf_id] = {}
inf = self.ofc_router_inf_dict[ofc_router_inf_id]
if ip_address:
inf.update({'ip_address': ip_address})
if mac_address:
inf.update({'mac_address': mac_address})
LOG.debug('update_router_route: SUCCEED')
@call_log.log
def delete_router_interface(self, ofc_router_inf_id):
if ofc_router_inf_id in self.ofc_router_inf_dict:
del self.ofc_router_inf_dict[ofc_router_inf_id]
else:
if self.autocheck:
raise Exception(_('(delete_router_interface) '
'OFC router interface %s not found')
% ofc_router_inf_id)
LOG.debug('delete_router_interface: SUCCEED')
@call_log.log
def add_router_route(self, ofc_router_id, destination, nexthop):
route_id = "ofc-" + uuidutils.generate_uuid()[:-4]
# IP address format check
netaddr.IPNetwork(destination)
netaddr.IPAddress(nexthop)
if self.autocheck:
if ofc_router_id not in self.ofc_router_dict:
raise Exception(_('(add_router_route) OFC router %s not found')
% ofc_router_id)
# Check duplicate destination
if destination in [route['destination'] for route in
self.ofc_router_route_dict.values()]:
raise Exception(_('(add_router_route) '
'route to "%s" already exists') % destination)
self.ofc_router_route_dict[route_id] = {'router_id': ofc_router_id,
'destination': destination,
'nexthop': nexthop}
LOG.debug('add_router_route: SUCCEED (route_id=%s)', route_id)
return route_id
@call_log.log
def delete_router_route(self, ofc_router_route_id):
if ofc_router_route_id in self.ofc_router_route_dict:
del self.ofc_router_route_dict[ofc_router_route_id]
else:
if self.autocheck:
raise Exception(_('(delete_router_route) OFC router route %s '
'not found') % ofc_router_route_id)
LOG.debug('delete_router_route: SUCCEED')
@call_log.log
def list_router_routes(self, ofc_router_id):
if self.autocheck:
if ofc_router_id not in self.ofc_router_dict:
raise Exception(_('(delete_router) OFC router %s not found')
% ofc_router_id)
routes = [{'id': k,
'destination': v['destination'],
'nexthop': v['nexthop']}
for k, v in self.ofc_router_route_dict.items()
if v['router_id'] == ofc_router_id]
LOG.debug('list_router_routes: routes=%s', routes)
return routes
| apache-2.0 |
cchurch/ansible | lib/ansible/executor/task_result.py | 34 | 5741 | # Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.parsing.dataloader import DataLoader
from ansible.vars.clean import module_response_deepcopy, strip_internal_keys
_IGNORE = ('failed', 'skipped')
_PRESERVE = ('attempts', 'changed', 'retries')
_SUB_PRESERVE = {'_ansible_delegated_vars': ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection')}
# stuff callbacks need
CLEAN_EXCEPTIONS = (
'_ansible_verbose_always', # for debug and other actions, to always expand data (pretty jsonification)
'_ansible_item_label', # to know actual 'item' variable
'_ansible_no_log', # jic we didnt clean up well enough, DON'T LOG
'_ansible_verbose_override', # controls display of ansible_facts, gathering would be very noise with -v otherwise
)
class TaskResult:
'''
This class is responsible for interpreting the resulting data
from an executed task, and provides helper methods for determining
the result of a given task.
'''
def __init__(self, host, task, return_data, task_fields=None):
self._host = host
self._task = task
if isinstance(return_data, dict):
self._result = return_data.copy()
else:
self._result = DataLoader().load(return_data)
if task_fields is None:
self._task_fields = dict()
else:
self._task_fields = task_fields
@property
def task_name(self):
return self._task_fields.get('name', None) or self._task.get_name()
def is_changed(self):
return self._check_key('changed')
def is_skipped(self):
# loop results
if 'results' in self._result:
results = self._result['results']
# Loop tasks are only considered skipped if all items were skipped.
# some squashed results (eg, yum) are not dicts and can't be skipped individually
if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results):
return True
# regular tasks and squashed non-dict results
return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
return self._check_key('failed_when_result')
else:
return self._check_key('failed')
def is_unreachable(self):
return self._check_key('unreachable')
def needs_debugger(self, globally_enabled=False):
_debugger = self._task_fields.get('debugger')
_ignore_errors = C.TASK_DEBUGGER_IGNORE_ERRORS and self._task_fields.get('ignore_errors')
ret = False
if globally_enabled and ((self.is_failed() and not _ignore_errors) or self.is_unreachable()):
ret = True
if _debugger in ('always',):
ret = True
elif _debugger in ('never',):
ret = False
elif _debugger in ('on_failed',) and self.is_failed() and not _ignore_errors:
ret = True
elif _debugger in ('on_unreachable',) and self.is_unreachable():
ret = True
elif _debugger in('on_skipped',) and self.is_skipped():
ret = True
return ret
def _check_key(self, key):
'''get a specific key from the result or its items'''
if isinstance(self._result, dict) and key in self._result:
return self._result.get(key, False)
else:
flag = False
for res in self._result.get('results', []):
if isinstance(res, dict):
flag |= res.get(key, False)
return flag
def clean_copy(self):
''' returns 'clean' taskresult object '''
# FIXME: clean task_fields, _task and _host copies
result = TaskResult(self._host, self._task, {}, self._task_fields)
# statuses are already reflected on the event type
if result._task and result._task.action in ['debug']:
# debug is verbose by default to display vars, no need to add invocation
ignore = _IGNORE + ('invocation',)
else:
ignore = _IGNORE
subset = {}
# preserve subset for later
for sub in _SUB_PRESERVE:
if sub in self._result:
subset[sub] = {}
for key in _SUB_PRESERVE[sub]:
if key in self._result[sub]:
subset[sub][key] = self._result[sub][key]
if isinstance(self._task.no_log, bool) and self._task.no_log or self._result.get('_ansible_no_log', False):
x = {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}
# preserve full
for preserve in _PRESERVE:
if preserve in self._result:
x[preserve] = self._result[preserve]
result._result = x
elif self._result:
result._result = module_response_deepcopy(self._result)
# actualy remove
for remove_key in ignore:
if remove_key in result._result:
del result._result[remove_key]
# remove almost ALL internal keys, keep ones relevant to callback
strip_internal_keys(result._result, exceptions=CLEAN_EXCEPTIONS)
# keep subset
result._result.update(subset)
return result
| gpl-3.0 |
avanzosc/avanzosc6.1 | nan_stock_purchase_price/stock.py | 1 | 22528 | # -*- encoding: latin-1 -*-
##############################################################################
#
# Copyright (c) 2009 Ángel Álvarez - NaN (http://www.nan-tic.com) All Rights Reserved.
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import fields,osv
from tools.translate import _
import netsvc
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'received_quantity': fields.float('Received Quantity', help='Quantity of product received'),
}
stock_move()
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def action_scanner_confirm(self, cr, uid, ids, context=None):
"""
Function picked from nan_stock_scanner module.
"""
for picking in self.browse(cr, uid, ids, context):
new_picking = None
new_moves = []
if picking.purchase_id:
currency = picking.purchase_id.pricelist_id.currency_id.id
else:
currency = 0
complete, too_many, too_few , none = [], [], [],[]
for move in picking.move_lines:
if move.received_quantity == None or move.received_quantity == False or move.received_quantity == 0:
none.append( move )
elif move.product_qty == move.received_quantity:
complete.append(move)
elif move.product_qty > move.received_quantity:
too_few.append(move)
else:
too_many.append(move)
if len( none) == len( picking.move_lines):
return {'new_picking': False}
# Average price computation
if picking.type == 'in' and move.product_id.cost_method == 'average' and move.purchase_line_id:
product = self.pool.get('product.product').browse(cr, uid, move.product_id.id, context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
price = move.purchase_line_id.price_unit
price_uom = move.purchase_line_id.product_uom.id
qty = move.received_quantity
qty = self.pool.get('product.uom')._compute_qty(cr, uid, move.product_uom.id, qty, product.uom_id.id)
if (qty > 0):
new_price = self.pool.get('res.currency').compute(cr, uid, currency, user.company_id.currency_id.id, price)
new_price = self.pool.get('product.uom')._compute_price(cr, uid, price_uom, new_price, product.uom_id.id)
if product.qty_available<=0:
#new_std_price = new_price
new_std_price = move.purchase_line_id.price_unit
else:
new_std_price = move.purchase_line_id.price_unit
self.pool.get('product.product').write(cr, uid, [product.id], {
'standard_price': new_std_price
}, context)
self.pool.get('stock.move').write(cr, uid, [move.id], {
'price_unit': new_price
}, context)
#Comentado Dani
# new_std_price = ((product.standard_price * product.qty_available)\
# + (new_price * qty))/(product.qty_available + qty)
# self.pool.get('product.product').write(cr, uid, [product.id], {
# 'standard_price': new_std_price
# }, context)
# self.pool.get('stock.move').write(cr, uid, [move.id], {
# 'price_unit': new_price
# }, context)
if len(too_many) >0 or len(too_few) > 0 or len(none) > 0:
new_picking = self.copy(cr, uid, picking.id, {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking'),
'move_lines' : [],
'state':'draft',
}, context)
for move in too_few:
if move.received_quantity <> 0:
new_obj = self.pool.get('stock.move').copy(cr, uid, move.id, {
'product_qty' : move.received_quantity,
'product_uos_qty': move.received_quantity,
'picking_id' : new_picking,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
}, context)
self.pool.get('stock.move').write(cr, uid, [move.id], {
'product_qty' : move.product_qty - move.received_quantity,
'product_uos_qty':move.product_qty - move.received_quantity,
'prodlot_id': None,
'received_quantity':0,
}, context)
if new_picking:
self.pool.get('stock.move').write(cr, uid, [c.id for c in complete], {'picking_id': new_picking}, context)
for move in too_many:
self.pool.get('stock.move').write(cr, uid, [move.id], {
'product_qty' : received_quantity,
'product_uos_qty': received_quantity,
'picking_id': new_picking,
}, context)
else:
for move in too_many:
self.pool.get('stock.move').write(cr, uid, [move.id], {
'product_qty': received_quantity,
'product_uos_qty': received_quantity,
'prodlot_id':False,
}, context)
# At first we confirm the new picking (if necessary)
wf_service = netsvc.LocalService("workflow")
if new_picking:
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
# Then we finish the good picking
if new_picking:
self.write(cr, uid, [picking.id], {'backorder_id': new_picking}, context)
self.action_move(cr, uid, [new_picking], context)
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_done', cr)
wf_service.trg_write(uid, 'stock.picking', picking.id, cr)
else:
self.action_move(cr, uid, [picking.id], context)
wf_service.trg_validate(uid, 'stock.picking', picking.id, 'button_done', cr)
return {'new_picking':new_picking or False}
stock_picking()
class stock_input_wizard_line( osv.osv_memory ):
_name = 'stock.input.wizard.line'
def _total_amount(self, cr, uid, price, quantity, discount, purchase_order_id, purchase_order_line_id, context):
untaxed_amount = self._untaxed_amount(cr, uid, price, quantity, discount)
if purchase_order_line_id and purchase_order_id:
purchase_order_line = self.pool.get('purchase.order.line').browse(cr, uid, purchase_order_line_id, context)
purchase_order = self.pool.get('purchase.order').browse(cr, uid, purchase_order_id, context)
# calculation of line with taxes
taxes_value = 0.0
if len(purchase_order_line.taxes_id) > 0:
calculations = self.pool.get('account.tax').compute(cr, uid,purchase_order_line.taxes_id , price,quantity, purchase_order.partner_address_id.id, purchase_order_line.product_id, purchase_order.partner_id)
for dict_tax in calculations:
taxes_value += dict_tax['amount']
return untaxed_amount + taxes_value
return untaxed_amount
def _untaxed_amount(self,cr,uid,price,quantity,discount):
untaxed_amount = price * ((100.0-discount)/100.0) * quantity
return untaxed_amount
def _prices_on_product_uom(self,cr,uid,from_uom,prices,to_uom):
result = []
for price in prices:
new_price = self.pool.get('product.uom')._compute_price(cr,uid,from_uom,price,to_uom)
result.append(new_price)
return result
def _prices_on_currency(self,cr,uid,from_currency,prices,to_currency):
result = []
for price in prices:
new_price = self.pool.get('res.currency').compute(cr,uid,from_currency,to_currency,price)
result.append(new_price)
return result
def create( self, cr, uid, vals, context=None ):
if 'price' in vals and 'product_id' in vals:
product_id = vals['product_id']
product = self.pool.get('product.product').browse(cr, uid, product_id, context)
price = self.pool.get('product.uom')._compute_price(cr, uid, vals['product_uom'], vals['price'], product.uom_id.id)
self.pool.get('product.product').write(cr, uid, product_id, {
'last_purchase_price': price,
}, context)
move = self.pool.get('stock.move').browse(cr, uid, vals['stock_move_id'], context)
if move.purchase_line_id:
# Convert stock move UoM to order line's UoM
price = self.pool.get('product.uom')._compute_price(cr, uid, vals['product_uom'], vals['price'], move.purchase_line_id.product_uom.id)
if not 'discount' in vals:
vals['discount'] = 0.0
self.pool.get('purchase.order.line').write(cr, uid, [move.purchase_line_id.id], {
'price_base': price,
#'price_unit': price * (1-vals['discount']/100),
'price_unit': price * (1-vals.get('discount',0.0)/100),
'discount': vals['discount'],
}, context)
values = {
'received_quantity': vals['quantity'],
}
if move.product_uom.id != vals['product_uom']:
# Convert total stock move quantity to the same UoM used by the user
values['product_qty'] = self.pool.get('product.uom')._compute_qty(cr, uid, move.product_uom.id, move.product_qty, vals['product_uom'])
values['product_uom'] = vals['product_uom']
values['product_uos'] = vals['product_uom']
self.pool.get('stock.move').write(cr, uid, [move.id], values, context)
return super(stock_input_wizard_line,self).create(cr, uid, vals, context)
def on_change_fields(self, cr, uid, ids, product_id, quantity, product_uom, purchase_price, price, list_price, last_purchase_price, currency_id, discount, untaxed_amount, total_amount, purchase_order_id, purchase_order_line_id, stock_move_id, field_changed, context):
if field_changed == 'product_uom' and stock_move_id:
#prices calculation from product_uom
move = self.pool.get('stock.move').browse(cr, uid, stock_move_id, context)
product_uom_ori = move.product_uom.id
if product_uom_ori != product_uom:
new_prices = self._prices_on_product_uom(cr, uid, product_uom_ori, [purchase_price, price, list_price], product_uom)
[purchase_price, price, list_price] = new_prices
if field_changed == 'currency_id' and stock_move_id:
#prices calculation from currency
purchase_order = self.pool.get('purchase.order').browse(cr, uid, purchase_order_id, context)
currency_ori = purchase_order.pricelist_id.currency_id.id
if currency_ori != currency_id:
new_prices = self._prices_on_currency(cr,uid,currency_ori,[purchase_price, price, list_price],currency_id)
[purchase_price, price, list_price] = new_prices
total_amount = self._total_amount(cr,uid,price,quantity,discount,purchase_order_id,purchase_order_line_id, context)
untaxed_amount = self._untaxed_amount(cr,uid,price,quantity,discount)
return {
'value': {
'product_id': product_id,
'quantity': quantity,
'product_uom': product_uom,
'purchase_price': purchase_price,
'price': price,
'list_price': list_price,
'last_purchase_price': last_purchase_price,
'currency_id': currency_id,
'discount': discount,
'untaxed_amount': untaxed_amount,
'total_amount': total_amount,
'purchase_order': purchase_order_id,
'purchase_order_line': purchase_order_line_id,
'stock_move_id': stock_move_id,
}
}
_columns = {
'product_id':fields.many2one( 'product.product','Product'),
'quantity': fields.float('Quantity' ),
'product_uom': fields.many2one( 'product.uom','Uom '),
'purchase_price':fields.float('Purchase Price', readonly=True),
'price':fields.float( 'Price'),
'list_price': fields.float( 'List Price', readonly=True ),
'last_purchase_price': fields.float('Last Purchase Price', readonly=True),
'currency_id':fields.many2one( 'res.currency','Currency', readonly=True),
'discount': fields.float('Discount'),
'untaxed_amount':fields.float('Untaxed Amount', readonly=True),
'total_amount' : fields.float('Total Amount', readonly=True),
'wizard_id': fields.many2one( 'stock.input.wizard', 'Wizard id'),
'purchase_order_line': fields.many2one('purchase.order.line' ,'Purchase Order Line'),
'purchase_order': fields.many2one( 'purchase.order'),
'stock_picking': fields.many2one( 'stock.picking', 'Stock Picking'),
'stock_move_id': fields.many2one( 'stock.move','Stock Picking Lines'),
}
stock_input_wizard_line()
class stock_input_wizard(osv.osv_memory):
_name = 'stock.input.wizard'
def _default_picking_id(self,cr,uid,context=None):
if not context:
context = {}
return context.get('active_id', False)
def _default_line_ids(self, cr, uid,context=None):
if context is None:
context = {}
if not 'active_id' in context:
return {}
result = []
picking = self.pool.get('stock.picking').browse(cr, uid, context['active_id'], context)
#For each line of stock picking
for move in picking.move_lines:
product = move.product_id
if move.purchase_line_id:
#Mod Dani price_base = move.purchase_line_id.price_base
price_base = move.purchase_line_id.price_subtotal
discount = move.purchase_line_id.discount
untaxed_amount = move.purchase_line_id.price_subtotal
purchase_order_line_id = move.purchase_line_id.id
else:
price_base = product.standard_price
discount = 0.0
untaxed_amount = self.pool.get('stock.input.wizard.line')._untaxed_amount(cr, uid, price_base, move.product_qty, discount)
purchase_order_line_id = False
purchase_id = picking.purchase_id and picking.purchase_id.id or False
if purchase_id:
# prices calculation from supplier and his price list
pricelist_id = picking.purchase_id.pricelist_id.id
partner_id = picking.purchase_id.partner_id.id
list_price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist_id], product.id, 1.0, partner_id)[pricelist_id]
else:
pricelist_id = False
list_price = 0.0
# calculation of line with taxes
total_amount = self.pool.get('stock.input.wizard.line')._total_amount(cr, uid, price_base, move.product_qty, discount, purchase_id, purchase_order_line_id, context)
result += [{
'product_id': move.product_id.id,
'quantity': move.product_qty,
'product_uom': move.product_uom.id,
'purchase_price': price_base,
'price': price_base,
'list_price': list_price,
'last_purchase_price': product.last_purchase_price,
'currency_id': picking.purchase_id and picking.purchase_id.pricelist_id.currency_id.id or False,
'discount': discount,
'untaxed_amount': untaxed_amount,
'total_amount': total_amount,
'purchase_order': purchase_id,
'purchase_order_line': purchase_order_line_id,
'stock_move_id': move.id,
}]
return result
def accept_and_write(self,cr,uid,ids,context=None):
return {}
def on_cancel(self, cr, uid, ids, context=None):
return {}
def _wizard_line_values(self, stock_move_id, list):
#list with format [ [0,0,dictionary0], [ 0,0,dictionary1], ...]
if list:
for line in list:
dictionary = line[2]
if 'stock_move_id' in dictionary:
if dictionary['stock_move_id'] == stock_move_id:
return dictionary
return False
def _wizard_line_orphan(self,list):
#list with format [ [0,0,dictionary0], [ 0,0,dictionary1], ...]
if list:
for line in list:
dictionary = line[2]
if 'stock_move_id' in dictionary:
if not dictionary['stock_move_id']:
raise osv.except_osv(_('Warning'), _('You are trying to create a new line on wrong place. You should create it on Stock picking.'))
return False
def create( self, cr, uid, vals, context=None ):
if context is None:
context = {}
if 'active_id' in context:
vals.update({'picking_id':context['active_id']})
if 'picking_id' in vals:
picking_id = vals['picking_id']
# We will create a wizard_line for each line of stock picking.
# If there is any 'vals', we will take the values from vals, otherwise we'll take values from stock picking
picking = self.pool.get('stock.picking').browse(cr, uid, picking_id, context)
for move in picking.move_lines:
#search if this line has values (true when we modified any value on wizard_line)
if 'line_ids' in vals:
line_vals = self._wizard_line_values(move.id, vals['line_ids'])
if not line_vals: # Patch to correct calculation if no changes are made
if not move.price_unit:
# mod dani pricebak = self.pool.get('purchase.order.line').read(cr, uid, move.purchase_line_id.id,['price_base','discount'])
pricebak = self.pool.get('purchase.order.line').read(cr, uid, move.purchase_line_id.id,['price_subtotal','discount'])
line_vals = {
'stock_move_id': move.id,
'product_id': move.product_id.id,
'product_uom': move.product_uom.id,
#mod dani 'price': pricebak['price_base'],
'price': pricebak['price_subtotal'],
'quantity': move.product_qty,
'discount': pricebak['discount']
}
# pricebak = self.pool.get('product.product').read(cr, uid, move.product_id.id, 'standard_price')
else:
#if no values, then we put values from line of stock picking
line_vals = {
'stock_move_id': move.id,
'product_id': move.product_id.id,
'product_uom': move.product_uom.id,
'price': move.price_unit,
'quantity': move.product_qty,
# 'discount': move.purchase_line_id.discount
}
#creating the wizard_line
wizard_line_id = self.pool.get('stock.input.wizard.line').create(cr, uid, line_vals, context)
#Check if there is any new wizard_line (not included on stock picking)
self._wizard_line_orphan(vals['line_ids'])
self.pool.get('stock.picking').action_scanner_confirm(cr, uid, [picking_id], context)
return True
_columns = {
'picking_id':fields.many2one('stock.picking','Picking', readonly=True),
'line_ids': fields.one2many('stock.input.wizard.line', 'wizard_id', 'Lines'),
}
_defaults = {
'picking_id': _default_picking_id,
'line_ids': _default_line_ids,
}
stock_input_wizard()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
momm3/WelcomeBot | welcomebot/Lib/site-packages/setuptools/command/build_clib.py | 314 | 4484 | import distutils.command.build_clib as orig
from distutils.errors import DistutilsSetupError
from distutils import log
from setuptools.dep_util import newer_pairwise_group
class build_clib(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', dict())
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp
)
if newer_pairwise_group(dependencies, expected_objects) != ([], []):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
cflags = build_info.get('cflags')
objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects,
lib_name,
output_dir=self.build_clib,
debug=self.debug
)
| mit |
bop/foundation | lib/python2.7/site-packages/django/views/generic/dates.py | 80 | 19895 | import datetime
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.views.generic.base import View
from django.views.generic.detail import BaseDetailView, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
class YearMixin(object):
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"Return the year for which this view should display data"
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_(u"No year specified"))
return year
class MonthMixin(object):
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"Return the month for which this view should display data"
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_(u"No month specified"))
return month
def get_next_month(self, date):
"""
Get the next valid month.
"""
first_day, last_day = _month_bounds(date)
next = (last_day + datetime.timedelta(days=1)).replace(day=1)
return _get_next_prev_month(self, next, is_previous=False, use_first_day=True)
def get_previous_month(self, date):
"""
Get the previous valid month.
"""
first_day, last_day = _month_bounds(date)
prev = (first_day - datetime.timedelta(days=1))
return _get_next_prev_month(self, prev, is_previous=True, use_first_day=True)
class DayMixin(object):
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"Return the day for which this view should display data"
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_(u"No day specified"))
return day
def get_next_day(self, date):
"""
Get the next valid day.
"""
next = date + datetime.timedelta(days=1)
return _get_next_prev_month(self, next, is_previous=False, use_first_day=False)
def get_previous_day(self, date):
"""
Get the previous valid day.
"""
prev = date - datetime.timedelta(days=1)
return _get_next_prev_month(self, prev, is_previous=True, use_first_day=False)
class WeekMixin(object):
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"Return the week for which this view should display data"
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_(u"No week specified"))
return week
class DateMixin(object):
"""
Mixin class for views manipulating date-based data.
"""
date_field = None
allow_future = False
def get_date_field(self):
"""
Get the name of the date field to be used to filter by.
"""
if self.date_field is None:
raise ImproperlyConfigured(u"%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Returns `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""
Abstract base class for date-based views display a list of objects.
"""
allow_empty = False
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(object_list=self.object_list,
date_list=self.date_list)
context.update(extra_context)
return self.render_to_response(context)
def get_dated_items(self):
"""
Obtain the list of dates and itesm
"""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_dated_queryset(self, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
if not allow_future:
qs = qs.filter(**{'%s__lte' % date_field: timezone.now()})
if not allow_empty and not qs:
raise Http404(_(u"No %(verbose_name_plural)s available") % {
'verbose_name_plural': force_unicode(qs.model._meta.verbose_name_plural)
})
return qs
def get_date_list(self, queryset, date_type):
"""
Get a date list by calling `queryset.dates()`, checking along the way
for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
date_list = queryset.dates(date_field, date_type)[::-1]
if date_list is not None and not date_list and not allow_empty:
name = force_unicode(queryset.model._meta.verbose_name_plural)
raise Http404(_(u"No %(verbose_name_plural)s available") %
{'verbose_name_plural': name})
return date_list
def get_context_data(self, **kwargs):
"""
Get the context. Must return a Context (or subclass) instance.
"""
items = kwargs.pop('object_list')
context = super(BaseDateListView, self).get_context_data(object_list=items)
context.update(kwargs)
return context
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items.
Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, 'year')
if date_list:
object_list = qs.order_by('-' + self.get_date_field())
else:
object_list = qs.none()
return (date_list, object_list, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""
Top-level archive of date-based items.
"""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
make_object_list = False
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
# Yes, no error checking: the URLpattern ought to validate this; it's
# an error if it doesn't.
year = self.get_year()
date_field = self.get_date_field()
qs = self.get_dated_queryset(**{date_field+'__year': year})
date_list = self.get_date_list(qs, 'month')
if self.get_make_object_list():
object_list = qs.order_by('-'+date_field)
else:
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
object_list = qs.none()
return (date_list, object_list, {'year': year})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
# Construct a date-range lookup.
first_day, last_day = _month_bounds(date)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs, 'day')
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""
List of objects published in a given week.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
# Construct a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {'week': date})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""
List of objects published in a given week.
"""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""
List of objects published on a given day.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
date_field = self.get_date_field()
field = self.get_queryset().model._meta.get_field(date_field)
lookup_kwargs = _date_lookup_for_field(field, date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""
List of objects published on a given day.
"""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""
List of objects published today.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""
List of objects published today.
"""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""
Get the object this request displays.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = queryset or self.get_queryset()
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_(u"Future %(verbose_name_plural)s not available because %(class_name)s.allow_future is False.") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
})
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
date_field = self.get_date_field()
field = qs.model._meta.get_field(date_field)
lookup = _date_lookup_for_field(field, date)
qs = qs.filter(**lookup)
return super(BaseDetailView, self).get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month, month_format, day='', day_format='', delim='__'):
"""
Helper: get a datetime.date object given a format string and a year,
month, and possibly day; raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.datetime.strptime(datestr, format).date()
except ValueError:
raise Http404(_(u"Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _month_bounds(date):
"""
Helper: return the first and last days of the month for the given date.
"""
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
return first_day, last_day
def _get_next_prev_month(generic_view, naive_result, is_previous, use_first_day):
"""
Helper: Get the next or the previous valid date. The idea is to allow
links on month/day views to never be 404s by never providing a date
that'll be invalid for the given view.
This is a bit complicated since it handles both next and previous months
and days (for MonthArchiveView and DayArchiveView); hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day or month,
reguardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive month
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
# If allow_empty is True the naive value will be valid
if allow_empty:
result = naive_result
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lte' % date_field: naive_result}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: naive_result}
ordering = date_field
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
result = None
# Convert datetimes to a dates
if hasattr(result, 'date'):
result = result.date()
# For month views, we always want to have a date that's the first of the
# month for consistency's sake.
if result and use_first_day:
result = result.replace(day=1)
# Check against future dates.
if result and (allow_future or result < datetime.date.today()):
return result
else:
return None
def _date_lookup_for_field(field, date):
"""
Get the lookup kwargs for looking up a date against a given Field. If the
date field is a DateTimeField, we can't just do filter(df=date) because
that doesn't take the time into account. So we need to make a range lookup
in those cases.
"""
if isinstance(field, models.DateTimeField):
date_range = (
datetime.datetime.combine(date, datetime.time.min),
datetime.datetime.combine(date, datetime.time.max)
)
return {'%s__range' % field.name: date_range}
else:
return {field.name: date}
| gpl-2.0 |
nexedi/dream | dream/KnowledgeExtraction/PilotCases/JobShop/DataExtraction.py | 1 | 8090 | # ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 23 March 2015
@author: Panos
'''
import dream.KnowledgeExtraction.ImportDatabase as ImportDatabase
from datetime import datetime
import json
def DataExtraction(DBFilePath):
#connect to the database providing the following data and generate six different cursor variables
cnxn=ImportDatabase.ConnectionData(seekName='ServerData', file_path=DBFilePath, implicitExt='txt', number_of_cursors=6)
cursor=cnxn.getCursors()
#SQL query to extract data from orders table
a=cursor[0].execute("""
select Order_id, ProjectName, Customer, Order_date, Due_date, ProjectManager, Status
from orders
""")
#create a dictionary called data and put inside two lists (orders and stations) and a dictionary called WIP
data={}
data['productionOrders']=[]
data['WIP']={}
data['stations']=[]
productionOrders={}
#Another SQL query that extracts data from the machines table
b=cursor[1].execute("""
select MachineName, description
from machines
""")
# for every machine of the machines table
for j in range(b.rowcount):
#get the next line
ind3=b.fetchone()
#and insert in one of the above initiated lists
data['stations'].append(ind3.MachineName)
# for every order of the orders table
for j in range(a.rowcount):
#get the next line
ind0=a.fetchone()
#define a variable called status and holds the status of the order (extracted from orders table)
status = ind0.Status
#check if status is 'accepted' or 'in progress' and move in
if status == 'accepted' or status == 'in progress':
#insert the following extracted data from the database in order disctionary
productionOrders['name']=ind0.ProjectName
productionOrders['id']=ind0.Order_id
productionOrders['manager']=ind0.ProjectManager
orderDate=datetime.strptime(str(ind0.Order_date), '%Y-%m-%d')
productionOrders['orderDate']=str(orderDate)
productionOrders['_class']="Dream.Order"
dueDate=datetime.strptime(str(ind0.Due_date), '%Y-%m-%d')
productionOrders['dueDate']=str(dueDate)
productionOrders['componentsList']=[]
#SQL query to extract data from sequence table where order data is given
c=cursor[2].execute("""
select Order_id, WP_id, PartCode, PartName, Operation_Name, ProcessingTime, PersonnelCode, Quantity, step
from sequence where Order_id=?
""", ind0.Order_id)
appended=[] # list that holds the already added components
# for all the lines of c (every component)
for i in range(c.rowcount):
# create a comopnent dict
component={}
# and get the next row
ind1=c.fetchone()
type=ind1.PartName
component['componentType'] = type
code=ind1.PartCode
WP=ind1.WP_id
component['id']=code
component['name']=code
component['route']=[]
#SQL query that extracts data from sequence table where PartCode is given
d=cursor[3].execute("""
select PartCode, PartName, WP_id, Operation_Name, ProcessingTime, PersonnelCode, Quantity, step, Completed
from sequence
where PartCode=?
""", code)
#SQL query that joins the sequence and prerequisites tables where WP_id is common and PartCode is given
f=cursor[4].execute("""
select sequence.WP_id, sequence.PartCode, prerequisites.PartsNeeded
from sequence, prerequisites
where sequence.WP_id=prerequisites.WP_id
AND sequence.PartCode=?
""", code)
#loop every line in the sequence table
for line in range(d.rowcount):
#create a new dictionary to hold the sequence of this order
step={}
ind2=d.fetchone()
step['technology']=ind2.Operation_Name
step['sequence']=ind2.step
step['operator']=ind2.PersonnelCode
step['task_id']=ind2.WP_id
step['quantity']=ind2.Quantity
step['completed']=ind2.Completed
ind3=f.fetchone()
partsNeeded = ind3.PartsNeeded.replace(" ","").split(';')
for part in partsNeeded:
if part == '':
partsNeeded.remove(part)
step['requiredParts']=partsNeeded
step['processingTime']={}
step['processingTime']['Fixed']={}
step['processingTime']['Fixed']['mean']=ind2.ProcessingTime
component['route'].append(step)
#The following checks if the component ids have been inserted to appended
if not component['id'] in appended:
productionOrders['componentsList'].append(component)
appended.append(component['id'])
#SQL query to extract WIP data from the prod_status table when sequence and prod_status are joined together and PartCode is given
e= cursor[5].execute("""
select prod_status.WP_id, sequence.WP_id, sequence.ProcessingTime, sequence.step, MachineName, TIMEIN, TIMEOUT, prod_status.PersonnelCode
from prod_status
join sequence on sequence.WP_id = prod_status.WP_id
where sequence.PartCode=?
""", code)
#loop in the lines of the prod_status table
for x in range(e.rowcount):
ind3=e.fetchone()
for t in appended:
data['WIP'][code]={}
data['WIP'][code]['station']=ind3.MachineName
data['WIP'][code]['operator']=ind3.PersonnelCode
data['WIP'][code]['task_id']=ind3.WP_id
data['WIP'][code]['sequence']=ind3.step
timeIn=datetime.strptime(str(ind3.TIMEIN), '%Y-%m-%d %H:%M:%S')
data['WIP'][code]['timeIn']=str(timeIn)
if ind3.TIMEOUT:
timeOut=datetime.strptime(str(ind3.TIMEOUT), '%Y-%m-%d %H:%M:%S')
data['WIP'][code]['timeOut']=str(timeOut)
#in case the status is 'finished' continue to the next order
elif status == 'finished':
continue
data['productionOrders'].append(productionOrders.copy())
return data | gpl-3.0 |
kustodian/ansible | lib/ansible/modules/network/aci/aci_bd_to_l3out.py | 13 | 6470 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_bd_to_l3out
short_description: Bind Bridge Domain to L3 Out (fv:RsBDToOut)
description:
- Bind Bridge Domain to L3 Out on Cisco ACI fabrics.
version_added: '2.4'
options:
bd:
description:
- The name of the Bridge Domain.
type: str
aliases: [ bd_name, bridge_domain ]
l3out:
description:
- The name of the l3out to associate with th Bridge Domain.
type: str
tenant:
description:
- The name of the Tenant.
type: str
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(bd) and C(l3out) parameters should exist before using this module.
The M(aci_bd) and C(aci_l3out) can be used for these.
seealso:
- module: aci_bd
- module: aci_l3out
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(fv:RsBDToOut).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
'''
EXAMPLES = r''' # '''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
SUBNET_CONTROL_MAPPING = dict(
nd_ra='nd',
no_gw='no-default-gateway',
querier_ip='querier',
unspecified='',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
bd=dict(type='str', aliases=['bd_name', 'bridge_domain']), # Not required for querying all objects
l3out=dict(type='str'), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_together=[['gateway', 'mask']],
required_if=[
['state', 'present', ['bd', 'l3out', 'tenant']],
['state', 'absent', ['bd', 'l3out', 'tenant']],
],
)
bd = module.params.get('bd')
l3out = module.params.get('l3out')
state = module.params.get('state')
tenant = module.params.get('tenant')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvBD',
aci_rn='BD-{0}'.format(bd),
module_object=bd,
target_filter={'name': bd},
),
subclass_2=dict(
aci_class='fvRsBDToOut',
aci_rn='rsBDToOut-{0}'.format(l3out),
module_object=l3out,
target_filter={'tnL3extOutName': l3out},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvRsBDToOut',
class_config=dict(tnL3extOutName=l3out),
)
aci.get_diff(aci_class='fvRsBDToOut')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
silentfuzzle/calibre | src/calibre/ebooks/metadata/sources/test.py | 14 | 9942 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, tempfile, time
from Queue import Queue, Empty
from threading import Event
from calibre.customize.ui import all_metadata_plugins
from calibre import prints, sanitize_file_name2
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.sources.base import create_log, get_cached_cover_urls
from calibre.ebooks.metadata.sources.prefs import msprefs
def isbn_test(isbn):
isbn_ = check_isbn(isbn)
def test(mi):
misbn = check_isbn(mi.isbn)
if misbn and misbn == isbn_:
return True
prints('ISBN test failed. Expected: \'%s\' found \'%s\''%(isbn_, misbn))
return False
return test
def title_test(title, exact=False):
title = title.lower()
def test(mi):
mt = mi.title.lower()
if (exact and mt == title) or \
(not exact and title in mt):
return True
prints('Title test failed. Expected: \'%s\' found \'%s\''%(title, mt))
return False
return test
def authors_test(authors):
authors = set([x.lower() for x in authors])
def test(mi):
au = set([x.lower() for x in mi.authors])
if msprefs['swap_author_names']:
def revert_to_fn_ln(a):
if ',' not in a:
return a
parts = a.split(',', 1)
t = parts[-1]
parts = parts[:-1]
parts.insert(0, t)
return ' '.join(parts)
au = set([revert_to_fn_ln(x) for x in au])
if au == authors:
return True
prints('Author test failed. Expected: \'%s\' found \'%s\''%(authors, au))
return False
return test
def tags_test(tags):
tags = set([x.lower() for x in tags])
def test(mi):
t = set([x.lower() for x in mi.tags])
if t == tags:
return True
prints('Tags test failed. Expected: \'%s\' found \'%s\''%(tags, t))
return False
return test
def series_test(series, series_index):
series = series.lower()
def test(mi):
ms = mi.series.lower() if mi.series else ''
if (ms == series) and (series_index == mi.series_index):
return True
if mi.series:
prints('Series test failed. Expected: \'%s [%d]\' found \'%s[%d]\''%
(series, series_index, ms, mi.series_index))
else:
prints('Series test failed. Expected: \'%s [%d]\' found no series'%
(series, series_index))
return False
return test
def comments_test(sentinel):
def test(mi):
comm = mi.comments.lower() if mi.comments else ''
if sentinel and sentinel.lower() in comm:
return True
prints('comments test failed. %s not in comments'%sentinel)
return False
return test
def pubdate_test(year, month, day):
def test(mi):
p = mi.pubdate
if p is not None and p.year == year and p.month == month and p.day == day:
return True
return False
return test
def init_test(tdir_name):
tdir = tempfile.gettempdir()
lf = os.path.join(tdir, tdir_name.replace(' ', '')+'_identify_test.txt')
log = create_log(open(lf, 'wb'))
abort = Event()
return tdir, lf, log, abort
def test_identify(tests): # {{{
'''
:param tests: List of 2-tuples. Each two tuple is of the form (args,
test_funcs). args is a dict of keyword arguments to pass to
the identify method. test_funcs are callables that accept a
Metadata object and return True iff the object passes the
test.
'''
from calibre.ebooks.metadata.sources.identify import identify
tdir, lf, log, abort = init_test('Full Identify')
prints('Log saved to', lf)
times = []
for kwargs, test_funcs in tests:
log('#'*80)
log('### Running test with:', kwargs)
log('#'*80)
prints('Running test with:', kwargs)
args = (log, abort)
start_time = time.time()
results = identify(*args, **kwargs)
total_time = time.time() - start_time
times.append(total_time)
if not results:
prints('identify failed to find any results')
break
prints('Found', len(results), 'matches:', end=' ')
prints('Smaller relevance means better match')
for i, mi in enumerate(results):
prints('*'*30, 'Relevance:', i, '*'*30)
prints(mi)
prints('\nCached cover URLs :',
[x[0].name for x in get_cached_cover_urls(mi)])
prints('*'*75, '\n\n')
possibles = []
for mi in results:
test_failed = False
for tfunc in test_funcs:
if not tfunc(mi):
test_failed = True
break
if not test_failed:
possibles.append(mi)
if not possibles:
prints('ERROR: No results that passed all tests were found')
prints('Log saved to', lf)
raise SystemExit(1)
if results[0] is not possibles[0]:
prints('Most relevant result failed the tests')
raise SystemExit(1)
log('\n\n')
prints('Average time per query', sum(times)/len(times))
prints('Full log is at:', lf)
# }}}
def test_identify_plugin(name, tests, modify_plugin=lambda plugin:None,
fail_missing_meta=True): # {{{
'''
:param name: Plugin name
:param tests: List of 2-tuples. Each two tuple is of the form (args,
test_funcs). args is a dict of keyword arguments to pass to
the identify method. test_funcs are callables that accept a
Metadata object and return True iff the object passes the
test.
'''
plugin = None
for x in all_metadata_plugins():
if x.name == name and 'identify' in x.capabilities:
plugin = x
break
modify_plugin(plugin)
prints('Testing the identify function of', plugin.name)
prints('Using extra headers:', plugin.browser.addheaders)
tdir, lf, log, abort = init_test(plugin.name)
prints('Log saved to', lf)
times = []
for kwargs, test_funcs in tests:
prints('Running test with:', kwargs)
rq = Queue()
args = (log, rq, abort)
start_time = time.time()
plugin.running_a_test = True
try:
err = plugin.identify(*args, **kwargs)
finally:
plugin.running_a_test = False
total_time = time.time() - start_time
times.append(total_time)
if err is not None:
prints('identify returned an error for args', args)
prints(err)
break
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
prints('Found', len(results), 'matches:', end=' ')
prints('Smaller relevance means better match')
results.sort(key=plugin.identify_results_keygen(
title=kwargs.get('title', None), authors=kwargs.get('authors',
None), identifiers=kwargs.get('identifiers', {})))
for i, mi in enumerate(results):
prints('*'*30, 'Relevance:', i, '*'*30)
prints(mi)
prints('\nCached cover URL :',
plugin.get_cached_cover_url(mi.identifiers))
prints('*'*75, '\n\n')
possibles = []
for mi in results:
test_failed = False
for tfunc in test_funcs:
if not tfunc(mi):
test_failed = True
break
if not test_failed:
possibles.append(mi)
if not possibles:
prints('ERROR: No results that passed all tests were found')
prints('Log saved to', lf)
raise SystemExit(1)
good = [x for x in possibles if plugin.test_fields(x) is
None]
if not good:
prints('Failed to find', plugin.test_fields(possibles[0]))
if fail_missing_meta:
raise SystemExit(1)
if results[0] is not possibles[0]:
prints('Most relevant result failed the tests')
raise SystemExit(1)
if 'cover' in plugin.capabilities:
rq = Queue()
mi = results[0]
plugin.download_cover(log, rq, abort, title=mi.title,
authors=mi.authors, identifiers=mi.identifiers)
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
if not results and fail_missing_meta:
prints('Cover download failed')
raise SystemExit(1)
elif results:
cdata = results[0]
cover = os.path.join(tdir, plugin.name.replace(' ',
'')+'-%s-cover.jpg'%sanitize_file_name2(mi.title.replace(' ',
'_')))
with open(cover, 'wb') as f:
f.write(cdata[-1])
prints('Cover downloaded to:', cover)
if len(cdata[-1]) < 10240:
prints('Downloaded cover too small')
raise SystemExit(1)
prints('Average time per query', sum(times)/len(times))
if os.stat(lf).st_size > 10:
prints('There were some errors/warnings, see log', lf)
# }}}
| gpl-3.0 |
leki75/ansible | lib/ansible/compat/tests/__init__.py | 128 | 1267 | # (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
This module contains things that are only needed for compat in the testsuites,
not in ansible itself. If you are not installing the test suite, you can
safely remove this subdirectory.
'''
#
# Compat for python2.7
#
# One unittest needs to import builtins via __import__() so we need to have
# the string that represents it
try:
import __builtin__
except ImportError:
BUILTINS = 'builtins'
else:
BUILTINS = '__builtin__'
| gpl-3.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/PIL/ImtImagePlugin.py | 40 | 2203 | #
# The Python Imaging Library.
# $Id$
#
# IM Tools support for PIL
#
# history:
# 1996-05-27 fl Created (read 8-bit images only)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2)
#
# Copyright (c) Secret Labs AB 1997-2001.
# Copyright (c) Fredrik Lundh 1996-2001.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
import re
import Image, ImageFile
#
# --------------------------------------------------------------------
field = re.compile(r"([a-z]*) ([^ \r\n]*)")
##
# Image plugin for IM Tools images.
class ImtImageFile(ImageFile.ImageFile):
format = "IMT"
format_description = "IM Tools"
def _open(self):
# Quick rejection: if there's not a LF among the first
# 100 bytes, this is (probably) not a text header.
if not "\n" in self.fp.read(100):
raise SyntaxError, "not an IM file"
self.fp.seek(0)
xsize = ysize = 0
while 1:
s = self.fp.read(1)
if not s:
break
if s == chr(12):
# image data begins
self.tile = [("raw", (0,0)+self.size,
self.fp.tell(),
(self.mode, 0, 1))]
break
else:
# read key/value pair
# FIXME: dangerous, may read whole file
s = s + self.fp.readline()
if len(s) == 1 or len(s) > 100:
break
if s[0] == "*":
continue # comment
m = field.match(s)
if not m:
break
k, v = m.group(1,2)
if k == "width":
xsize = int(v)
self.size = xsize, ysize
elif k == "height":
ysize = int(v)
self.size = xsize, ysize
elif k == "pixel" and v == "n8":
self.mode = "L"
#
# --------------------------------------------------------------------
Image.register_open("IMT", ImtImageFile)
#
# no extension registered (".im" is simply too common)
| mit |
sarvex/tensorflow | tensorflow/python/kernel_tests/variable_scope_test.py | 10 | 82778 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import numpy
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
if context.executing_eagerly():
wrapped = wrap_function.wrap_function(graph_function, [self])
# use the wrapped graph function
wrapped()
else:
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertIs(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertIs(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set(v.name for v in vs._vars.values()))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestingError(self):
with variable_scope.variable_scope("aa"):
scope = variable_scope.variable_scope("bb")
scope.__enter__()
with variable_scope.variable_scope("cc"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'string:0' shape=() dtype=string>
# has invalid type <class '...ResourceVariable'>, must be a string or Tensor.
# (Can not convert a ResourceVariable into a Tensor or Operation.)
@test_util.run_deprecated_v1
def testStringDefaultInitializer(self):
with self.cached_session():
v = variable_scope.get_variable("string", shape=[], dtype=dtypes.string)
variables_lib.global_variables_initializer().run()
self.assertAllEqual(compat.as_bytes(self.evaluate(v)), b"")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testGetVariableInGraphNestedUnderEagerContext(self):
with context.eager_mode():
@function.defun
def f():
v = variable_scope.get_variable("should_be_resource", [])
self.assertEqual(type(v), resource_variable_ops.ResourceVariable)
f()
def testEagerVariableStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
# Test copying.
new_store = store.copy()
with new_store.as_default():
new_v = variable_scope.get_variable("v")
new_w = variable_scope.get_variable("w")
self.assertEqual(new_v.numpy(), v.numpy())
self.assertEqual(new_w.numpy(), w.numpy())
self.assertTrue(new_v in new_store.variables())
self.assertTrue(new_w in new_store.variables())
self.assertTrue(new_v in new_store.trainable_variables())
self.assertFalse(new_w in new_store.trainable_variables())
self.assertFalse(new_v in new_store.non_trainable_variables())
self.assertTrue(new_w in new_store.non_trainable_variables())
# Check that variables are separate instances.
for v in store.variables():
v.assign(-1)
for v in new_store.variables():
v.assign(1)
for v in store.variables():
self.assertEqual(v.numpy(), -1)
for v in new_store.variables():
self.assertEqual(v.numpy(), 1)
def testEagerVariableStoreWithEagerDefun(self):
with context.eager_mode():
@function.defun
def f():
x = constant_op.constant([[2.0]])
d1 = core_layers.Dense(
1, name="my_dense", kernel_initializer=init_ops.ones_initializer())
_ = d1(x) # create variables
self.assertEqual(len(d1.variables), 2)
v1, v2 = d1.variables
d2 = core_layers.Dense(
1,
name="my_dense",
kernel_initializer=init_ops.ones_initializer(),
_reuse=True)
_ = d2(x)
self.assertEqual(len(d2.variables), 2)
v3, v4 = d2.variables
self.assertIs(v1, v3)
self.assertIs(v2, v4)
f()
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_in_graph_and_eager_modes
def testEagerVariablesStoreAddsToCollections(self):
store = variable_scope.EagerVariableStore()
with store.as_default():
trainable = variable_scope.get_variable("v1", [], trainable=True)
not_trainable = variable_scope.get_variable("v2", [], trainable=False)
concat = variable_scope.get_variable(
"v3", [], collections=[ops.GraphKeys.CONCATENATED_VARIABLES])
self.assertEqual(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES),
[trainable, not_trainable])
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
[trainable, concat])
self.assertEqual(
ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES), [concat])
def testEagerVariablesOutsideStoreNotAddedToCollections(self):
with context.eager_mode():
variable_scope.get_variable("v1", [], trainable=True)
variable_scope.get_variable("v2", [], trainable=False)
self.assertFalse(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
def testEagerVariableStoreWithFunctionalLayer(self):
with context.eager_mode():
container = variable_scope.EagerVariableStore()
x = constant_op.constant([[2.0]])
with container.as_default():
y = core_layers.dense(x, 1, name="my_dense",
kernel_initializer=init_ops.ones_initializer())
self.assertAllEqual(y, [[2.0]])
self.assertEqual(len(container.variables()), 2)
# Recreate the layer to test reuse.
with container.as_default():
core_layers.dense(x, 1, name="my_dense",
kernel_initializer=init_ops.ones_initializer())
self.assertEqual(len(container.variables()), 2)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'.
@test_util.run_in_graph_and_eager_modes
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if context.executing_eagerly() else TypeError
with self.assertRaises(error):
variable_scope.get_variable("x4", initializer={})
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError=: You must feed a value for placeholder tensor
# 'ReadVariableOp/resource' with dtype resource
@test_util.run_in_graph_and_eager_modes
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError: /job:moo/replica:0/task:0/device:CPU:0 unknown device.
@test_util.run_deprecated_v1
def testVarScopeCachingDevice(self):
with self.cached_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: Tensor.name is meaningless when eager execution is enabled.
@test_util.run_in_graph_and_eager_modes
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if not context.executing_eagerly():
v = variable_scope.get_variable("v",
[]) # "v" is already there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Tensor-typed variable initializers must either be wrapped in an
# init_scope or callable...
@test_util.run_in_graph_and_eager_modes
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegex(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegex(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'v0:0' shape=(1,) dtype=float32> has
# invalid type <class '...ops.resource_variable_ops.ResourceVariable'>, must
# be a string or Tensor. (Can not convert a ResourceVariable into a Tensor or
# Operation.)
@test_util.run_deprecated_v1
def testControlDeps(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegex(errors.OpError, "uninitialized"):
self.evaluate(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual(1, self.evaluate(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegex(errors.OpError, "uninitialized"):
self.evaluate(v0)
with self.assertRaisesRegex(errors.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AssertionError: True is not false (last assertFalse)
@test_util.run_deprecated_v1
def testEnableResourceVariables(self):
old = variable_scope._DEFAULT_USE_RESOURCE
try:
variable_scope.enable_resource_variables()
self.assertTrue(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
variable_scope.disable_resource_variables()
self.assertFalse(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
finally:
variable_scope._DEFAULT_USE_RESOURCE = old
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument None has invalid type <type 'NoneType'>
@test_util.run_deprecated_v1
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegex(errors.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegex(errors.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'.
@test_util.run_in_graph_and_eager_modes
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if not context.executing_eagerly():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOriginalNameScope(self):
with self.cached_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeObjectReuse(self):
with self.cached_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScope(self):
with self.cached_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetVar(self):
with self.cached_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertIs(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertIs(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertIs(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name,
"outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
# (different assertions failing after wrapping, in both execution modes)
@test_util.run_in_graph_and_eager_modes
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
if not context.executing_eagerly():
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSignatureGetVarVsGetLocalVar(self):
"""get_{local,}variable() must take the same list of args."""
arg_names = tf_inspect.getargspec(variable_scope.get_variable)[0]
local_arg_names = tf_inspect.getargspec(
variable_scope.get_local_variable)[0]
self.assertEqual(arg_names, local_arg_names)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetCollection(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetTrainableVariablesWithGetVariable(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable(
"testGetTrainableVariables_c", [], trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.get_variable(
"testGetTrainableVariables_d", [],
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0",
])
# All other sync values sets trainable=True
_ = variable_scope.get_variable(
"testGetTrainableVariables_f", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_f:0",
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetTrainableVariablesWithVariable(self):
with self.cached_session():
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_a")
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_b")
_ = variable_scope.variable(
1.0, name="testGetTrainableVariables_c", trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_d",
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0",
])
# All other sync values sets trainable=True
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_f",
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_f:0",
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetGlobalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual(
[v.name for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetLocalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable("c", [])
self.assertEqual([v.name for v in scope.local_variables()], ["foo/b:0"])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesNoArgs(self):
v = variable_scope.get_variable("foo", initializer=lambda: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesOptionalArgs(self):
v = variable_scope.get_variable("foo", initializer=lambda x=True: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesUnprovidedArgsAndNoShape(self):
with self.assertRaisesRegex(
ValueError,
"The initializer passed is not valid. It should be a callable with no "
"arguments and the shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be fully defined."):
variable_scope.get_variable("foo", initializer=lambda x: [2])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with variable_scope.variable_scope("_"):
pass
self.assertRaisesRegex(ValueError, "'_' is not a valid scope name", f)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegex(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegex(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertIs(v, v_reused)
def testNoReuseInEagerByDefault(self):
with context.eager_mode():
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v1 = variable_scope.get_variable("name0", shape=(3, 1, 1))
v2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertIsNot(v1, v2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
def testPartitionConcatenatesAlongCorrectAxisResourceInEager(self):
with context.eager_mode():
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertIs(v, v2)
self.assertIs(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = variable_scope.VariableSynchronization.AUTO
aggregation = variable_scope.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
variable_scope.get_variable("v", [1])
self.assertEqual(1, called[0])
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
synchronization = variable_scope.VariableSynchronization.ON_READ
aggregation = variable_scope.VariableAggregation.MEAN
variable_scope.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'custom_getter/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("custom_getter/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
@test_util.run_deprecated_v1
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = self.evaluate([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'sum_getter_2/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("sum_getter_2/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
@test_util.run_deprecated_v1
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope("prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope("sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = self.evaluate([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v, (((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3])) + (
(np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creator_a):
with variable_scope.variable_creator_scope(creator_b):
variable_scope.variable(1.0, name="one_name")
self.assertEqual(variable_names[0], "forced_name")
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
variable_scope.VariableAggregation.MEAN)
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creater_c):
variable_scope.get_variable(
"v", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
# Save the state so we can clean up at the end.
graph = ops.get_default_graph()
old_creator_stack = graph._variable_creator_stack
try:
scope = variable_scope.variable_creator_scope(creator)
scope.__enter__()
with variable_scope.variable_creator_scope(creator):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
finally:
graph._variable_creator_stack = old_creator_stack
class PartitionInfoTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
class VariableScopeMultithreadedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsDisjointScopeEntry(self):
def thread_fn(i, graph):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEqual("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
graph = ops.get_default_graph()
threads = [
threading.Thread(target=thread_fn, args=(
i,
graph,
)) for i in range(2)
]
threads[0].start()
# Allow thread 0 to finish before starting thread 1.
threads[0].join()
threads[1].start()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsNestedScopeEntry(self):
def thread_fn(i, graph, run_event, pause_event):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEqual("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
pause_event.set()
run_event.wait()
graph = ops.get_default_graph()
run_events = [threading.Event() for _ in range(2)]
pause_events = [threading.Event() for _ in range(2)]
threads = [
threading.Thread(
target=thread_fn, args=(i, graph, run_events[i], pause_events[i]))
for i in range(2)
]
# Start first thread.
threads[0].start()
pause_events[0].wait()
# Start next thread once the first thread has paused.
threads[1].start()
pause_events[1].wait()
# Resume both threads.
run_events[0].set()
run_events[1].set()
threads[0].join()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with variable_scope.variable_scope(main_thread_scope):
with variable_scope.variable_scope("foo"):
v = variable_scope.get_variable("v", [])
self.assertEqual("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [])
self.assertEqual("bar/v:0", v.name)
graph = ops.get_default_graph()
with variable_scope.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
if __name__ == "__main__":
test.main()
| apache-2.0 |
neumerance/cloudloon2 | .venv/lib/python2.7/site-packages/keystoneclient/v3/roles.py | 5 | 4893 | # Copyright 2011 OpenStack LLC.
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
from keystoneclient import exceptions
class Role(base.Resource):
"""Represents an Identity role.
Attributes:
* id: a uuid that identifies the role
* name: user-facing identifier
"""
pass
class RoleManager(base.CrudManager):
"""Manager class for manipulating Identity roles."""
resource_class = Role
collection_key = 'roles'
key = 'role'
def _role_grants_base_url(self, user, group, domain, project):
# When called, we have already checked that only one of user & group
# and one of domain & project have been specified
params = {}
if project:
params['project_id'] = base.getid(project)
base_url = '/projects/%(project_id)s'
elif domain:
params['domain_id'] = base.getid(domain)
base_url = '/domains/%(domain_id)s'
if user:
params['user_id'] = base.getid(user)
base_url += '/users/%(user_id)s'
elif group:
params['group_id'] = base.getid(group)
base_url += '/groups/%(group_id)s'
return base_url % params
def _require_domain_xor_project(self, domain, project):
if (domain and project) or (not domain and not project):
msg = 'Specify either a domain or project, not both'
raise exceptions.ValidationError(msg)
def _require_user_xor_group(self, user, group):
if (user and group) or (not user and not group):
msg = 'Specify either a user or group, not both'
raise exceptions.ValidationError(msg)
def create(self, name):
return super(RoleManager, self).create(
name=name)
def get(self, role):
return super(RoleManager, self).get(
role_id=base.getid(role))
def list(self, user=None, group=None, domain=None, project=None, **kwargs):
"""Lists roles and role grants.
If no arguments are provided, all roles in the system will be
listed.
If a user or group is specified, you must also specify either a
domain or project to list role grants on that pair. And if
``**kwargs`` are provided, then also filter roles with
attributes matching ``**kwargs``.
"""
if user or group:
self._require_user_xor_group(user, group)
self._require_domain_xor_project(domain, project)
return super(RoleManager, self).list(
base_url=self._role_grants_base_url(user, group,
domain, project),
**kwargs)
return super(RoleManager, self).list()
def update(self, role, name=None):
return super(RoleManager, self).update(
role_id=base.getid(role),
name=name)
def delete(self, role):
return super(RoleManager, self).delete(
role_id=base.getid(role))
def grant(self, role, user=None, group=None, domain=None, project=None):
"""Grants a role to a user or group on a domain or project."""
self._require_domain_xor_project(domain, project)
self._require_user_xor_group(user, group)
return super(RoleManager, self).put(
base_url=self._role_grants_base_url(user, group, domain, project),
role_id=base.getid(role))
def check(self, role, user=None, group=None, domain=None, project=None):
"""Checks if a user or group has a role on a domain or project."""
self._require_domain_xor_project(domain, project)
self._require_user_xor_group(user, group)
return super(RoleManager, self).head(
base_url=self._role_grants_base_url(user, group, domain, project),
role_id=base.getid(role))
def revoke(self, role, user=None, group=None, domain=None, project=None):
"""Revokes a role from a user or group on a domain or project."""
self._require_domain_xor_project(domain, project)
self._require_user_xor_group(user, group)
return super(RoleManager, self).delete(
base_url=self._role_grants_base_url(user, group, domain, project),
role_id=base.getid(role))
| apache-2.0 |
rahul67/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlwt3/Row.py | 46 | 10111 | # -*- coding: windows-1252 -*-
from . import BIFFRecords
from . import Style
from .Cell import StrCell, BlankCell, NumberCell, FormulaCell, MulBlankCell, BooleanCell, ErrorCell, \
_get_cells_biff_data_mul
from . import ExcelFormula
import datetime as dt
try:
from decimal import Decimal
except ImportError:
# Python 2.3: decimal not supported; create dummy Decimal class
class Decimal(object):
pass
class Row(object):
__slots__ = [# private variables
"__idx",
"__parent",
"__parent_wb",
"__cells",
"__min_col_idx",
"__max_col_idx",
"__xf_index",
"__has_default_xf_index",
"__height_in_pixels",
# public variables
"height",
"has_default_height",
"height_mismatch",
"level",
"collapse",
"hidden",
"space_above",
"space_below"]
def __init__(self, rowx, parent_sheet):
if not (isinstance(rowx, int) and 0 <= rowx <= 65535):
raise ValueError("row index (%r) not an int in range(65536)" % rowx)
self.__idx = rowx
self.__parent = parent_sheet
self.__parent_wb = parent_sheet.get_parent()
self.__cells = {}
self.__min_col_idx = 0
self.__max_col_idx = 0
self.__xf_index = 0x0F
self.__has_default_xf_index = 0
self.__height_in_pixels = 0x11
self.height = 0x00FF
self.has_default_height = 0x00
self.height_mismatch = 0
self.level = 0
self.collapse = 0
self.hidden = 0
self.space_above = 0
self.space_below = 0
def __adjust_height(self, style):
twips = style.font.height
points = float(twips)/20.0
# Cell height in pixels can be calcuted by following approx. formula:
# cell height in pixels = font height in points * 83/50 + 2/5
# It works when screen resolution is 96 dpi
pix = int(round(points*83.0/50.0 + 2.0/5.0))
if pix > self.__height_in_pixels:
self.__height_in_pixels = pix
def __adjust_bound_col_idx(self, *args):
for arg in args:
iarg = int(arg)
if not ((0 <= iarg <= 255) and arg == iarg):
raise ValueError("column index (%r) not an int in range(256)" % arg)
sheet = self.__parent
if iarg < self.__min_col_idx:
self.__min_col_idx = iarg
if iarg > self.__max_col_idx:
self.__max_col_idx = iarg
if iarg < sheet.first_used_col:
sheet.first_used_col = iarg
if iarg > sheet.last_used_col:
sheet.last_used_col = iarg
def __excel_date_dt(self, date):
if isinstance(date, dt.date) and (not isinstance(date, dt.datetime)):
epoch = dt.date(1899, 12, 31)
elif isinstance(date, dt.time):
date = dt.datetime.combine(dt.datetime(1900, 1, 1), date)
epoch = dt.datetime(1900, 1, 1, 0, 0, 0)
else:
epoch = dt.datetime(1899, 12, 31, 0, 0, 0)
delta = date - epoch
xldate = delta.days + float(delta.seconds) / (24*60*60)
# Add a day for Excel's missing leap day in 1900
if xldate > 59:
xldate += 1
return xldate
def get_height_in_pixels(self):
return self.__height_in_pixels
def set_style(self, style):
self.__adjust_height(style)
self.__xf_index = self.__parent_wb.add_style(style)
self.__has_default_xf_index = 1
def get_xf_index(self):
return self.__xf_index
def get_cells_count(self):
return len(self.__cells)
def get_min_col(self):
return self.__min_col_idx
def get_max_col(self):
return self.__max_col_idx
def get_row_biff_data(self):
height_options = (self.height & 0x07FFF)
height_options |= (self.has_default_height & 0x01) << 15
options = (self.level & 0x07) << 0
options |= (self.collapse & 0x01) << 4
options |= (self.hidden & 0x01) << 5
options |= (self.height_mismatch & 0x01) << 6
options |= (self.__has_default_xf_index & 0x01) << 7
options |= (0x01 & 0x01) << 8
options |= (self.__xf_index & 0x0FFF) << 16
options |= (self.space_above & 1) << 28
options |= (self.space_below & 1) << 29
return BIFFRecords.RowRecord(self.__idx, self.__min_col_idx,
self.__max_col_idx, height_options, options).get()
def insert_cell(self, col_index, cell_obj):
if col_index in self.__cells:
if not self.__parent._cell_overwrite_ok:
msg = "Attempt to overwrite cell: sheetname=%r rowx=%d colx=%d" \
% (self.__parent.name, self.__idx, col_index)
raise Exception(msg)
prev_cell_obj = self.__cells[col_index]
sst_idx = getattr(prev_cell_obj, 'sst_idx', None)
if sst_idx is not None:
self.__parent_wb.del_str(sst_idx)
self.__cells[col_index] = cell_obj
def insert_mulcells(self, colx1, colx2, cell_obj):
self.insert_cell(colx1, cell_obj)
for col_index in range(colx1+1, colx2+1):
self.insert_cell(col_index, None)
def get_cells_biff_data(self):
cell_items = [item for item in self.__cells.items() if item[1] is not None]
cell_items.sort() # in column order
return _get_cells_biff_data_mul(self.__idx, cell_items)
# previously:
# return ''.join([cell.get_biff_data() for colx, cell in cell_items])
def get_index(self):
return self.__idx
def set_cell_text(self, colx, value, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, StrCell(self.__idx, colx, xf_index, self.__parent_wb.add_str(value)))
def set_cell_blank(self, colx, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, BlankCell(self.__idx, colx, xf_index))
def set_cell_mulblanks(self, first_colx, last_colx, style=Style.default_style):
assert 0 <= first_colx <= last_colx <= 255
self.__adjust_height(style)
self.__adjust_bound_col_idx(first_colx, last_colx)
xf_index = self.__parent_wb.add_style(style)
# ncols = last_colx - first_colx + 1
self.insert_mulcells(first_colx, last_colx, MulBlankCell(self.__idx, first_colx, last_colx, xf_index))
def set_cell_number(self, colx, number, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, NumberCell(self.__idx, colx, xf_index, number))
def set_cell_date(self, colx, datetime_obj, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx,
NumberCell(self.__idx, colx, xf_index, self.__excel_date_dt(datetime_obj)))
def set_cell_formula(self, colx, formula, style=Style.default_style, calc_flags=0):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.__parent_wb.add_sheet_reference(formula)
self.insert_cell(colx, FormulaCell(self.__idx, colx, xf_index, formula, calc_flags=0))
def set_cell_boolean(self, colx, value, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, BooleanCell(self.__idx, colx, xf_index, bool(value)))
def set_cell_error(self, colx, error_string_or_code, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, ErrorCell(self.__idx, colx, xf_index, error_string_or_code))
def write(self, col, label, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(col)
style_index = self.__parent_wb.add_style(style)
if isinstance(label, str):
if len(label) > 0:
self.insert_cell(col,
StrCell(self.__idx, col, style_index, self.__parent_wb.add_str(label))
)
else:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
elif isinstance(label, bool): # bool is subclass of int; test bool first
self.insert_cell(col, BooleanCell(self.__idx, col, style_index, label))
elif isinstance(label, (float, int, Decimal)):
self.insert_cell(col, NumberCell(self.__idx, col, style_index, label))
elif isinstance(label, (dt.datetime, dt.date, dt.time)):
date_number = self.__excel_date_dt(label)
self.insert_cell(col, NumberCell(self.__idx, col, style_index, date_number))
elif label is None:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
elif isinstance(label, ExcelFormula.Formula):
self.__parent_wb.add_sheet_reference(label)
self.insert_cell(col, FormulaCell(self.__idx, col, style_index, label))
else:
raise Exception("Unexpected data type %r" % type(label))
write_blanks = set_cell_mulblanks
| apache-2.0 |
leeahoward/django-rest-framework | rest_framework/validators.py | 73 | 9603 | """
We perform uniqueness checks explicitly on the serializer class, rather
the using Django's `.full_clean()`.
This gives us better separation of concerns, allows us to use single-step
object creation, and makes it possible to switch between using the implicit
`ModelSerializer` class and an equivalent explicit `Serializer` class.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import unicode_to_repr
from rest_framework.exceptions import ValidationError
from rest_framework.utils.representation import smart_repr
class UniqueValidator(object):
"""
Validator that corresponds to `unique=True` on a model field.
Should be applied to an individual field on the serializer.
"""
message = _('This field must be unique.')
def __init__(self, queryset, message=None):
self.queryset = queryset
self.serializer_field = None
self.message = message or self.message
def set_context(self, serializer_field):
"""
This hook is called by the serializer instance,
prior to the validation call being made.
"""
# Determine the underlying model field name. This may not be the
# same as the serializer field name if `source=<>` is set.
self.field_name = serializer_field.source_attrs[0]
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer_field.parent, 'instance', None)
def filter_queryset(self, value, queryset):
"""
Filter the queryset to all instances matching the given attribute.
"""
filter_kwargs = {self.field_name: value}
return queryset.filter(**filter_kwargs)
def exclude_current_instance(self, queryset):
"""
If an instance is being updated, then do not include
that instance itself as a uniqueness conflict.
"""
if self.instance is not None:
return queryset.exclude(pk=self.instance.pk)
return queryset
def __call__(self, value):
queryset = self.queryset
queryset = self.filter_queryset(value, queryset)
queryset = self.exclude_current_instance(queryset)
if queryset.exists():
raise ValidationError(self.message)
def __repr__(self):
return unicode_to_repr('<%s(queryset=%s)>' % (
self.__class__.__name__,
smart_repr(self.queryset)
))
class UniqueTogetherValidator(object):
"""
Validator that corresponds to `unique_together = (...)` on a model class.
Should be applied to the serializer class, not to an individual field.
"""
message = _('The fields {field_names} must make a unique set.')
missing_message = _('This field is required.')
def __init__(self, queryset, fields, message=None):
self.queryset = queryset
self.fields = fields
self.serializer_field = None
self.message = message or self.message
def set_context(self, serializer):
"""
This hook is called by the serializer instance,
prior to the validation call being made.
"""
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer, 'instance', None)
def enforce_required_fields(self, attrs):
"""
The `UniqueTogetherValidator` always forces an implied 'required'
state on the fields it applies to.
"""
if self.instance is not None:
return
missing = dict([
(field_name, self.missing_message)
for field_name in self.fields
if field_name not in attrs
])
if missing:
raise ValidationError(missing)
def filter_queryset(self, attrs, queryset):
"""
Filter the queryset to all instances matching the given attributes.
"""
# If this is an update, then any unprovided field should
# have it's value set based on the existing instance attribute.
if self.instance is not None:
for field_name in self.fields:
if field_name not in attrs:
attrs[field_name] = getattr(self.instance, field_name)
# Determine the filter keyword arguments and filter the queryset.
filter_kwargs = dict([
(field_name, attrs[field_name])
for field_name in self.fields
])
return queryset.filter(**filter_kwargs)
def exclude_current_instance(self, attrs, queryset):
"""
If an instance is being updated, then do not include
that instance itself as a uniqueness conflict.
"""
if self.instance is not None:
return queryset.exclude(pk=self.instance.pk)
return queryset
def __call__(self, attrs):
self.enforce_required_fields(attrs)
queryset = self.queryset
queryset = self.filter_queryset(attrs, queryset)
queryset = self.exclude_current_instance(attrs, queryset)
# Ignore validation if any field is None
checked_values = [
value for field, value in attrs.items() if field in self.fields
]
if None not in checked_values and queryset.exists():
field_names = ', '.join(self.fields)
raise ValidationError(self.message.format(field_names=field_names))
def __repr__(self):
return unicode_to_repr('<%s(queryset=%s, fields=%s)>' % (
self.__class__.__name__,
smart_repr(self.queryset),
smart_repr(self.fields)
))
class BaseUniqueForValidator(object):
message = None
missing_message = _('This field is required.')
def __init__(self, queryset, field, date_field, message=None):
self.queryset = queryset
self.field = field
self.date_field = date_field
self.message = message or self.message
def set_context(self, serializer):
"""
This hook is called by the serializer instance,
prior to the validation call being made.
"""
# Determine the underlying model field names. These may not be the
# same as the serializer field names if `source=<>` is set.
self.field_name = serializer.fields[self.field].source_attrs[0]
self.date_field_name = serializer.fields[self.date_field].source_attrs[0]
# Determine the existing instance, if this is an update operation.
self.instance = getattr(serializer, 'instance', None)
def enforce_required_fields(self, attrs):
"""
The `UniqueFor<Range>Validator` classes always force an implied
'required' state on the fields they are applied to.
"""
missing = dict([
(field_name, self.missing_message)
for field_name in [self.field, self.date_field]
if field_name not in attrs
])
if missing:
raise ValidationError(missing)
def filter_queryset(self, attrs, queryset):
raise NotImplementedError('`filter_queryset` must be implemented.')
def exclude_current_instance(self, attrs, queryset):
"""
If an instance is being updated, then do not include
that instance itself as a uniqueness conflict.
"""
if self.instance is not None:
return queryset.exclude(pk=self.instance.pk)
return queryset
def __call__(self, attrs):
self.enforce_required_fields(attrs)
queryset = self.queryset
queryset = self.filter_queryset(attrs, queryset)
queryset = self.exclude_current_instance(attrs, queryset)
if queryset.exists():
message = self.message.format(date_field=self.date_field)
raise ValidationError({self.field: message})
def __repr__(self):
return unicode_to_repr('<%s(queryset=%s, field=%s, date_field=%s)>' % (
self.__class__.__name__,
smart_repr(self.queryset),
smart_repr(self.field),
smart_repr(self.date_field)
))
class UniqueForDateValidator(BaseUniqueForValidator):
message = _('This field must be unique for the "{date_field}" date.')
def filter_queryset(self, attrs, queryset):
value = attrs[self.field]
date = attrs[self.date_field]
filter_kwargs = {}
filter_kwargs[self.field_name] = value
filter_kwargs['%s__day' % self.date_field_name] = date.day
filter_kwargs['%s__month' % self.date_field_name] = date.month
filter_kwargs['%s__year' % self.date_field_name] = date.year
return queryset.filter(**filter_kwargs)
class UniqueForMonthValidator(BaseUniqueForValidator):
message = _('This field must be unique for the "{date_field}" month.')
def filter_queryset(self, attrs, queryset):
value = attrs[self.field]
date = attrs[self.date_field]
filter_kwargs = {}
filter_kwargs[self.field_name] = value
filter_kwargs['%s__month' % self.date_field_name] = date.month
return queryset.filter(**filter_kwargs)
class UniqueForYearValidator(BaseUniqueForValidator):
message = _('This field must be unique for the "{date_field}" year.')
def filter_queryset(self, attrs, queryset):
value = attrs[self.field]
date = attrs[self.date_field]
filter_kwargs = {}
filter_kwargs[self.field_name] = value
filter_kwargs['%s__year' % self.date_field_name] = date.year
return queryset.filter(**filter_kwargs)
| bsd-2-clause |
nicolargo/intellij-community | python/lib/Lib/site-packages/django/db/backends/mysql/base.py | 71 | 13240 | """
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
import re
import sys
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE, FLAG, CLIENT
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.safestring import SafeString, SafeUnicode
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
from warnings import filterwarnings
filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeUnicode and SafeString as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
def max_name_length(self):
return 64
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def _cursor(self):
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = settings_dict['PASSWORD']
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
self.connection.encoders[SafeUnicode] = self.connection.encoders[unicode]
self.connection.encoders[SafeString] = self.connection.encoders[str]
connection_created.send(sender=self.__class__, connection=self)
cursor = CursorWrapper(self.connection.cursor())
return cursor
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
| apache-2.0 |
AchimTuran/sfs-python | examples/modal_room_acoustics.py | 1 | 1362 | """
This example illustrates the use of the modal room model.
"""
import numpy as np
import matplotlib.pyplot as plt
import sfs
x0 = [1, 3, 1.80] # source position
L = [6, 6, 3] # dimensions of room
deltan = 0.01 # absorption factor of walls
n0 = [1, 0, 0] # normal vector of source (only for compatibilty)
N = 20 # maximum order of modes
#N = [1, 0, 0] # room mode to compute
fresponse = True # freqeuency response or sound field?
# compute and plot frequency response at one point
if fresponse:
f = np.linspace(20, 200, 180) # frequency
omega = 2 * np.pi * f # angular frequency
grid = sfs.util.xyz_grid(1, 1, 1.80, spacing=1)
p = []
for om in omega:
p.append(sfs.mono.source.point_modal(om, x0, n0, grid, L,
N=N, deltan=deltan))
p = np.asarray(p)
plt.plot(f, 20*np.log10(np.abs(p)))
plt.xlabel('frequency / Hz')
plt.ylabel('level / dB')
plt.grid()
# compute and plot sound field for one frequency
if not fresponse:
f = 500 # frequency
omega = 2 * np.pi * f # angular frequency
grid = sfs.util.xyz_grid([0, L[0]], [0, L[1]], L[2], spacing=.1)
p = sfs.mono.source.point_modal(omega, x0, n0, grid, L, N=N, deltan=deltan)
sfs.plot.soundfield(p, grid, xnorm=[2, 3, 0], colorbar=False,
vmax=2, vmin=-2)
| mit |
esander91/zulip | zerver/lib/test_helpers.py | 4 | 12477 | from __future__ import absolute_import
from django.test import TestCase
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient,
)
from zerver.models import (
get_realm,
get_user_profile_by_email,
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
)
import base64
import os
import re
import time
import ujson
import urllib
from contextlib import contextmanager
import six
API_KEYS = {}
@contextmanager
def stub(obj, name, f):
old_f = getattr(obj, name)
setattr(obj, name, f)
yield
setattr(obj, name, old_f)
@contextmanager
def simulated_queue_client(client):
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient
@contextmanager
def tornado_redirected_to_list(lst):
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
cache_queries = []
def my_cache_get(key, cache_name=None):
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = []
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
TimeTrackingCursor.execute = cursor_execute
def cursor_executemany(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params)
TimeTrackingCursor.executemany = cursor_executemany
yield queries
TimeTrackingCursor.execute = old_execute
TimeTrackingCursor.executemany = old_executemany
def find_key_by_email(address):
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyObject(object):
pass
class DummyTornadoRequest(object):
def __init__(self):
self.connection = DummyObject()
self.connection.stream = DummyStream()
class DummyHandler(object):
def __init__(self, assert_callback):
self.assert_callback = assert_callback
self.request = DummyTornadoRequest()
# Mocks RequestHandler.async_callback, which wraps a callback to
# handle exceptions. We return the callback as-is.
def async_callback(self, cb):
return cb
def write(self, response):
raise NotImplemented
def zulip_finish(self, response, *ignore):
if self.assert_callback:
self.assert_callback(response)
class DummySession(object):
session_key = "0"
class DummyStream(object):
def closed(self):
return False
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile, assert_callback=None):
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler(assert_callback)
self.session = DummySession()
self._log_data = {}
self.META = {'PATH_INFO': 'test'}
self._log_data = {}
class AuthedTestCase(TestCase):
# Helper because self.client.patch annoying requires you to urlencode
def client_patch(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.patch(url, info, **kwargs)
def client_put(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.put(url, info, **kwargs)
def client_delete(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.delete(url, info, **kwargs)
def login(self, email, password=None):
if password is None:
password = initial_password(email)
return self.client.post('/accounts/login/',
{'username':email, 'password':password})
def register(self, username, password, domain="zulip.com"):
self.client.post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com"):
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
"""
return self.client.post('/accounts/register/',
{'full_name': username, 'password': password,
'key': find_key_by_email(username + '@' + domain),
'terms': True})
def get_api_key(self, email):
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
credentials = "%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode(credentials)
}
def get_streams(self, email):
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile = user_profile,
active = True,
recipient__type = Recipient.STREAM)
return [get_display_recipient(sub.recipient) for sub in subs]
def send_message(self, sender_name, recipient_list, message_type,
content="test content", subject="test", **kwargs):
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(recipient_list, six.string_types):
recipient_list = [recipient_list]
(sending_client, _) = Client.objects.get_or_create(name="test suite")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client.post("/json/get_old_messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
realm = get_realm(realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count, exact=False):
actual_count = len(queries)
if exact:
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring):
self.assertIn(msg_substring, self.get_json_error(result))
def fixture_data(self, type, action, file_type='json'):
return open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action, file_type))).read()
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
realm = get_realm(resolve_email_to_domain(email))
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data = {}, invite_only=False):
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client.post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
if stream_name != None:
self.subscribe_to_stream(email, stream_name)
result = self.client.post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
return msg
| apache-2.0 |
jhuapl-marti/marti | env-crits/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.py | 906 | 1809 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
| mit |
Sonicbids/django | django/core/management/commands/shell.py | 492 | 3951 | import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Runs a Python interactive interpreter. Tries to use IPython or bpython, if one of them is available."
requires_system_checks = False
shells = ['ipython', 'bpython']
def add_arguments(self, parser):
parser.add_argument('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython or bpython.')
parser.add_argument('--no-startup', action='store_true', dest='no_startup',
help='When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.')
parser.add_argument('-i', '--interface', choices=self.shells, dest='interface',
help='Specify an interactive interpreter interface. Available options: "ipython" and "bpython"')
def _ipython_pre_011(self):
"""Start IPython pre-0.11"""
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
def _ipython_pre_100(self):
"""Start IPython pre-1.0.0"""
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
app = TerminalIPythonApp.instance()
app.initialize(argv=[])
app.start()
def _ipython(self):
"""Start IPython >= 1.0"""
from IPython import start_ipython
start_ipython(argv=[])
def ipython(self):
"""Start any version of IPython"""
for ip in (self._ipython, self._ipython_pre_100, self._ipython_pre_011):
try:
ip()
except ImportError:
pass
else:
return
# no IPython, raise ImportError
raise ImportError("No IPython")
def bpython(self):
import bpython
bpython.embed()
def run_shell(self, shell=None):
available_shells = [shell] if shell else self.shells
for shell in available_shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle(self, **options):
try:
if options['plain']:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell(shell=options['interface'])
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if not options['no_startup']:
for pythonrc in (os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'):
if not pythonrc:
continue
pythonrc = os.path.expanduser(pythonrc)
if not os.path.isfile(pythonrc):
continue
try:
with open(pythonrc) as handle:
exec(compile(handle.read(), pythonrc, 'exec'), imported_objects)
except NameError:
pass
code.interact(local=imported_objects)
| bsd-3-clause |
sigma-random/peach | Peach/generator.py | 3 | 5630 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
class Generator(object):
"""
Generators generate data. Examples of generators could be a static string
or integer, a string repeater, etc. Generators can be "incremented" by
calling C{next()} to produce the next variant of data. Generators can be
fairly complex, combining sub-generators to build things like packets.
Generators support the iterator protocol and can be used as such.
When building a Generator one should keep in mind that the value from a
generator could be asked for more then once per "round". Also it is
recommended that you use the default C{getValue()} implementation and
override the C{getRawValue()} method instead.
@see: L{SimpleGenerator}
"""
def __init__(self):
"""Base constructor, please call me!"""
self._group = None
self._transformer = None
self._identity = None # Stack-trace of were we came from
self._name = None
# For debugging.
# This is slow (0.02 sec), sometimes this init function can get called
# like 50K times during initialization of a large fuzzing object tree!
# self._identity = traceback.format_stack()
def identity(self):
"""Who are we and were do we come from?"""
return self._identity
def __iter__(self):
"""
Return iterator for Generator object. This is always the Generator
object itself.
@rtype: Generator
@return: Returns iterator, this is always self.
"""
return self
def next(self):
"""For Generators, please use the GeneratorCompleted exception instead
of StopIteration (its a subclass)."""
raise GeneratorCompleted("Peach.generator.Generator")
def getValue(self):
"""Return data, passed through a transformer if set.
@rtype: string
@return: Returns generated data
"""
if self._transformer is not None:
return self._transformer.encode(self.getRawValue())
return self.getRawValue()
def getRawValue(self):
"""Return raw value without passing through transformer if set.
@rtype: string
@return: Data before transformations
"""
return None
def getGroup(self):
"""Get the group this Generator belongs to. Groups are used to
increment sets of Generators.
@rtype: Group
@return: Returns Group this generator belongs to
"""
return self._group
def setGroup(self, group):
"""
Set the group this Generator belongs to. This function will
automatically add the Generator into the Group. Groups are used to
increment sets of Generators.
@type group: Group
@param group: Group this generator belongs to
"""
self._group = group
if self._group is not None:
self._group.addGenerator(self)
def getTransformer(self):
"""
Get transformer (if set). Transformers are used to transform data in
some way (such as HTML encoding, etc).
@rtype: Transformer
@return: Current transformer or None
"""
return self._transformer
def setTransformer(self, trans):
"""
Set transformer. Transformers are used to transform data in some way
(such as HTML encoding, etc).
@type trans: Transformer
@param trans: Transformer to run data through
@rtype: Generator
@return: self
"""
self._transformer = trans
return self
def reset(self):
"""Called to reset the generator to its initial state."""
pass
def getName(self):
"""Get the name of this generator. Useful for debugging."""
return self._name
def setName(self, name):
"""
Set the name of this generator. Useful for debugging complex data
generators. Stack-traces may end up in a generator creation statement
giving limited feedback on which generator in an array might be causing
the problem.
@type name: string
@param name: Name of generator
"""
self._name = name
class SimpleGenerator(Generator):
"""
A simple generator contains another, possibly complex generator statement.
Useful when breaking things apart for reuse.
To use, simply create a class that contains a _generator:
class MySimpleGenerator(SimpleGenerator):
def __init__(self, group = None):
SimpleGenerator.__init__(self, group)
self._generator = GeneratorList(None, [
Static('AAA'),
Repeater(None, Static('A'), 1, 100)
])
NOTE: Do not set group on you generators unless they will not be
incremented by self._generator.next().
"""
def __init__(self, group=None):
"""
@type group: Group
@param group: Group to use
"""
Generator.__init__(self)
self.setGroup(group)
self._generator = None
def next(self):
self._generator.next()
def getRawValue(self):
return self._generator.getValue()
def reset(self):
self._generator.reset()
class GeneratorCompleted(StopIteration):
"""
Exception indicating that the generator has completed all permutations of
its data.
"""
pass
| mpl-2.0 |
yaoshanliang/markdown-preview.vim | pythonx/markdown_parser.py | 3 | 34899 | # coding: utf-8
"""
mistune
~~~~~~~
The fastest markdown parser in pure Python with renderer feature.
:copyright: (c) 2014 - 2015 by Hsiaoming Yang.
"""
import re
import inspect
__version__ = '0.7.1'
__author__ = 'Hsiaoming Yang <me@lepture.com>'
__all__ = [
'BlockGrammar', 'BlockLexer',
'InlineGrammar', 'InlineLexer',
'Renderer', 'Markdown',
'markdown', 'escape',
]
_key_pattern = re.compile(r'\s+')
_escape_pattern = re.compile(r'&(?!#?\w+;)')
_newline_pattern = re.compile(r'\r\n|\r')
_block_quote_leading_pattern = re.compile(r'^ *> ?', flags=re.M)
_block_code_leadning_pattern = re.compile(r'^ {4}', re.M)
_inline_tags = [
'a', 'em', 'strong', 'small', 's', 'cite', 'q', 'dfn', 'abbr', 'data',
'time', 'code', 'var', 'samp', 'kbd', 'sub', 'sup', 'i', 'b', 'u', 'mark',
'ruby', 'rt', 'rp', 'bdi', 'bdo', 'span', 'br', 'wbr', 'ins', 'del',
'img', 'font',
]
_pre_tags = ['pre', 'script', 'style']
_valid_end = r'(?!:/|[^\w\s@]*@)\b'
_valid_attr = r'''"[^"]*"|'[^']*'|[^'">]'''
_block_tag = r'(?!(?:%s)\b)\w+%s' % ('|'.join(_inline_tags), _valid_end)
def _pure_pattern(regex):
pattern = regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
return pattern
def _keyify(key):
return _key_pattern.sub(' ', key.lower())
def escape(text, quote=False, smart_amp=True):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences.
The original cgi.escape will always escape "&", but you can control
this one for a smart escape amp.
:param quote: if set to True, " and ' will be escaped.
:param smart_amp: if set to False, & will always be escaped.
"""
if smart_amp:
text = _escape_pattern.sub('&', text)
else:
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
if quote:
text = text.replace('"', '"')
text = text.replace("'", ''')
return text
def preprocessing(text, tab=4):
text = _newline_pattern.sub('\n', text)
text = text.replace('\t', ' ' * tab)
text = text.replace('\u00a0', ' ')
text = text.replace('\u2424', '\n')
pattern = re.compile(r'^ +$', re.M)
return pattern.sub('', text)
class BlockGrammar(object):
"""Grammars for block level tokens."""
def_links = re.compile(
r'^ *\[([^^\]]+)\]: *' # [key]:
r'<?([^\s>]+)>?' # <link> or link
r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)'
)
def_footnotes = re.compile(
r'^\[\^([^\]]+)\]: *('
r'[^\n]*(?:\n+|$)' # [^key]:
r'(?: {1,}[^\n]*(?:\n+|$))*'
r')'
)
newline = re.compile(r'^\n+')
block_code = re.compile(r'^( {4}[^\n]+\n*)+')
fences = re.compile(
r'^ *(`{3,}|~{3,}) *(\S+)? *\n' # ```lang
r'([\s\S]+?)\s*'
r'\1 *(?:\n+|$)' # ```
)
hrule = re.compile(r'^ {0,3}[-*_](?: *[-*_]){2,} *(?:\n+|$)')
heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)')
lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)')
block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+')
list_block = re.compile(
r'^( *)([*+-]|\d+\.) [\s\S]+?'
r'(?:'
r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))' # hrule
r'|\n+(?=%s)' # def links
r'|\n+(?=%s)' # def footnotes
r'|\n{2,}'
r'(?! )'
r'(?!\1(?:[*+-]|\d+\.) )\n*'
r'|'
r'\s*$)' % (
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
)
)
list_item = re.compile(
r'^(( *)(?:[*+-]|\d+\.) [^\n]*'
r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)',
flags=re.M
)
list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +')
paragraph = re.compile(
r'^((?:[^\n]+\n?(?!'
r'%s|%s|%s|%s|%s|%s|%s|%s|%s'
r'))+)\n*' % (
_pure_pattern(fences).replace(r'\1', r'\2'),
_pure_pattern(list_block).replace(r'\1', r'\3'),
_pure_pattern(hrule),
_pure_pattern(heading),
_pure_pattern(lheading),
_pure_pattern(block_quote),
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
'<' + _block_tag,
)
)
block_html = re.compile(
r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % (
r'<!--[\s\S]*?-->',
r'<(%s)((?:%s)*?)>([\s\S]+?)<\/\1>' % (_block_tag, _valid_attr),
r'<%s(?:%s)*?>' % (_block_tag, _valid_attr),
)
)
table = re.compile(
r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*'
)
nptable = re.compile(
r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*'
)
text = re.compile(r'^[^\n]+')
class BlockLexer(object):
"""Block level lexer for block grammars."""
grammar_class = BlockGrammar
default_rules = [
'newline', 'hrule', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'block_quote',
'list_block', 'block_html', 'def_links',
'def_footnotes', 'table', 'paragraph', 'text'
]
list_rules = (
'newline', 'block_code', 'fences', 'lheading', 'hrule',
'block_quote', 'list_block', 'block_html', 'text',
)
footnote_rules = (
'newline', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'hrule', 'block_quote',
'list_block', 'block_html', 'table', 'paragraph', 'text'
)
def __init__(self, rules=None, **kwargs):
self.tokens = []
self.def_links = {}
self.def_footnotes = {}
if not rules:
rules = self.grammar_class()
self.rules = rules
def __call__(self, text, rules=None):
return self.parse(text, rules)
def parse(self, text, rules=None):
text = text.rstrip('\n')
if not rules:
rules = self.default_rules
def manipulate(text):
for key in rules:
rule = getattr(self.rules, key)
m = rule.match(text)
if not m:
continue
getattr(self, 'parse_%s' % key)(m)
return m
return False # pragma: no cover
while text:
m = manipulate(text)
if m is not False:
text = text[len(m.group(0)):]
continue
if text: # pragma: no cover
raise RuntimeError('Infinite loop at: %s' % text)
return self.tokens
def parse_newline(self, m):
length = len(m.group(0))
if length > 1:
self.tokens.append({'type': 'newline'})
def parse_block_code(self, m):
# clean leading whitespace
code = _block_code_leadning_pattern.sub('', m.group(0))
self.tokens.append({
'type': 'code',
'lang': None,
'text': code,
})
def parse_fences(self, m):
self.tokens.append({
'type': 'code',
'lang': m.group(2),
'text': m.group(3),
})
def parse_heading(self, m):
self.tokens.append({
'type': 'heading',
'level': len(m.group(1)),
'text': m.group(2),
})
def parse_lheading(self, m):
"""Parse setext heading."""
self.tokens.append({
'type': 'heading',
'level': 1 if m.group(2) == '=' else 2,
'text': m.group(1),
})
def parse_hrule(self, m):
self.tokens.append({'type': 'hrule'})
def parse_list_block(self, m):
bull = m.group(2)
self.tokens.append({
'type': 'list_start',
'ordered': '.' in bull,
})
cap = m.group(0)
self._process_list_item(cap, bull)
self.tokens.append({'type': 'list_end'})
def _process_list_item(self, cap, bull):
cap = self.rules.list_item.findall(cap)
_next = False
length = len(cap)
for i in range(length):
item = cap[i][0]
# remove the bullet
space = len(item)
item = self.rules.list_bullet.sub('', item)
# outdent
if '\n ' in item:
space = space - len(item)
pattern = re.compile(r'^ {1,%d}' % space, flags=re.M)
item = pattern.sub('', item)
# determin whether item is loose or not
loose = _next
if not loose and re.search(r'\n\n(?!\s*$)', item):
loose = True
rest = len(item)
if i != length - 1 and rest:
_next = item[rest-1] == '\n'
if not loose:
loose = _next
if loose:
t = 'loose_item_start'
else:
t = 'list_item_start'
self.tokens.append({'type': t})
# recurse
self.parse(item, self.list_rules)
self.tokens.append({'type': 'list_item_end'})
def parse_block_quote(self, m):
self.tokens.append({'type': 'block_quote_start'})
# clean leading >
cap = _block_quote_leading_pattern.sub('', m.group(0))
self.parse(cap)
self.tokens.append({'type': 'block_quote_end'})
def parse_def_links(self, m):
key = _keyify(m.group(1))
self.def_links[key] = {
'link': m.group(2),
'title': m.group(3),
}
def parse_def_footnotes(self, m):
key = _keyify(m.group(1))
if key in self.def_footnotes:
# footnote is already defined
return
self.def_footnotes[key] = 0
self.tokens.append({
'type': 'footnote_start',
'key': key,
})
text = m.group(2)
if '\n' in text:
lines = text.split('\n')
whitespace = None
for line in lines[1:]:
space = len(line) - len(line.lstrip())
if space and (not whitespace or space < whitespace):
whitespace = space
newlines = [lines[0]]
for line in lines[1:]:
newlines.append(line[whitespace:])
text = '\n'.join(newlines)
self.parse(text, self.footnote_rules)
self.tokens.append({
'type': 'footnote_end',
'key': key,
})
def parse_table(self, m):
item = self._process_table(m)
cells = re.sub(r'(?: *\| *)?\n$', '', m.group(3))
cells = cells.split('\n')
for i, v in enumerate(cells):
v = re.sub(r'^ *\| *| *\| *$', '', v)
cells[i] = re.split(r' *\| *', v)
item['cells'] = cells
self.tokens.append(item)
def parse_nptable(self, m):
item = self._process_table(m)
cells = re.sub(r'\n$', '', m.group(3))
cells = cells.split('\n')
for i, v in enumerate(cells):
cells[i] = re.split(r' *\| *', v)
item['cells'] = cells
self.tokens.append(item)
def _process_table(self, m):
header = re.sub(r'^ *| *\| *$', '', m.group(1))
header = re.split(r' *\| *', header)
align = re.sub(r' *|\| *$', '', m.group(2))
align = re.split(r' *\| *', align)
for i, v in enumerate(align):
if re.search(r'^ *-+: *$', v):
align[i] = 'right'
elif re.search(r'^ *:-+: *$', v):
align[i] = 'center'
elif re.search(r'^ *:-+ *$', v):
align[i] = 'left'
else:
align[i] = None
item = {
'type': 'table',
'header': header,
'align': align,
}
return item
def parse_block_html(self, m):
tag = m.group(1)
if not tag:
text = m.group(0)
self.tokens.append({
'type': 'close_html',
'text': text
})
else:
attr = m.group(2)
text = m.group(3)
self.tokens.append({
'type': 'open_html',
'tag': tag,
'extra': attr,
'text': text
})
def parse_paragraph(self, m):
text = m.group(1).rstrip('\n')
self.tokens.append({'type': 'paragraph', 'text': text})
def parse_text(self, m):
text = m.group(0)
self.tokens.append({'type': 'text', 'text': text})
class InlineGrammar(object):
"""Grammars for inline level tokens."""
escape = re.compile(r'^\\([\\`*{}\[\]()#+\-.!_>~|])') # \* \+ \! ....
inline_html = re.compile(
r'^(?:%s|%s|%s)' % (
r'<!--[\s\S]*?-->',
r'<(\w+%s)((?:%s)*?)>([\s\S]*?)<\/\1>' % (_valid_end, _valid_attr),
r'<\w+%s(?:%s)*?>' % (_valid_end, _valid_attr),
)
)
autolink = re.compile(r'^<([^ >]+(@|:)[^ >]+)>')
link = re.compile(
r'^!?\[('
r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
r')\]\('
r'''\s*(<)?([\s\S]*?)(?(2)>)(?:\s+['"]([\s\S]*?)['"])?\s*'''
r'\)'
)
reflink = re.compile(
r'^!?\[('
r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
r')\]\s*\[([^^\]]*)\]'
)
nolink = re.compile(r'^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]')
url = re.compile(r'''^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])''')
double_emphasis = re.compile(
r'^_{2}([\s\S]+?)_{2}(?!_)' # __word__
r'|'
r'^\*{2}([\s\S]+?)\*{2}(?!\*)' # **word**
)
emphasis = re.compile(
r'^\b_((?:__|[\s\S])+?)_\b' # _word_
r'|'
r'^\*((?:\*\*|[\s\S])+?)\*(?!\*)' # *word*
)
code = re.compile(r'^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)') # `code`
linebreak = re.compile(r'^ {2,}\n(?!\s*$)')
strikethrough = re.compile(r'^~~(?=\S)([\s\S]+?\S)~~') # ~~word~~
footnote = re.compile(r'^\[\^([^\]]+)\]')
text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| {2,}\n|$)')
def hard_wrap(self):
"""Grammar for hard wrap linebreak. You don't need to add two
spaces at the end of a line.
"""
self.linebreak = re.compile(r'^ *\n(?!\s*$)')
self.text = re.compile(
r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)'
)
class InlineLexer(object):
"""Inline level lexer for inline grammars."""
grammar_class = InlineGrammar
default_rules = [
'escape', 'inline_html', 'autolink', 'url',
'footnote', 'link', 'reflink', 'nolink',
'double_emphasis', 'emphasis', 'code',
'linebreak', 'strikethrough', 'text',
]
inline_html_rules = [
'escape', 'autolink', 'url', 'link', 'reflink',
'nolink', 'double_emphasis', 'emphasis', 'code',
'linebreak', 'strikethrough', 'text',
]
def __init__(self, renderer, rules=None, **kwargs):
self.renderer = renderer
self.links = {}
self.footnotes = {}
self.footnote_index = 0
if not rules:
rules = self.grammar_class()
self.rules = rules
self._in_link = False
self._in_footnote = False
kwargs.update(self.renderer.options)
self._parse_inline_html = kwargs.get('parse_inline_html')
def __call__(self, text, rules=None):
return self.output(text, rules)
def setup(self, links, footnotes):
self.footnote_index = 0
self.links = links or {}
self.footnotes = footnotes or {}
def output(self, text, rules=None):
text = text.rstrip('\n')
if not rules:
rules = list(self.default_rules)
if self._in_footnote and 'footnote' in rules:
rules.remove('footnote')
output = self.renderer.placeholder()
def manipulate(text):
for key in rules:
pattern = getattr(self.rules, key)
m = pattern.match(text)
if not m:
continue
self.line_match = m
out = getattr(self, 'output_%s' % key)(m)
if out is not None:
return m, out
return False # pragma: no cover
self.line_started = False
while text:
ret = manipulate(text)
self.line_started = True
if ret is not False:
m, out = ret
output += out
text = text[len(m.group(0)):]
continue
if text: # pragma: no cover
raise RuntimeError('Infinite loop at: %s' % text)
return output
def output_escape(self, m):
return m.group(1)
def output_autolink(self, m):
link = m.group(1)
if m.group(2) == '@':
is_email = True
else:
is_email = False
return self.renderer.autolink(link, is_email)
def output_url(self, m):
link = m.group(1)
if self._in_link:
return self.renderer.text(link)
return self.renderer.autolink(link, False)
def output_inline_html(self, m):
tag = m.group(1)
if self._parse_inline_html and tag in _inline_tags:
text = m.group(3)
if tag == 'a':
self._in_link = True
text = self.output(text, rules=self.inline_html_rules)
self._in_link = False
else:
text = self.output(text, rules=self.inline_html_rules)
extra = m.group(2) or ''
html = '<%s%s>%s</%s>' % (tag, extra, text, tag)
else:
html = m.group(0)
return self.renderer.inline_html(html)
def output_footnote(self, m):
key = _keyify(m.group(1))
if key not in self.footnotes:
return None
if self.footnotes[key]:
return None
self.footnote_index += 1
self.footnotes[key] = self.footnote_index
return self.renderer.footnote_ref(key, self.footnote_index)
def output_link(self, m):
return self._process_link(m, m.group(3), m.group(4))
def output_reflink(self, m):
key = _keyify(m.group(2) or m.group(1))
if key not in self.links:
return None
ret = self.links[key]
return self._process_link(m, ret['link'], ret['title'])
def output_nolink(self, m):
key = _keyify(m.group(1))
if key not in self.links:
return None
ret = self.links[key]
return self._process_link(m, ret['link'], ret['title'])
def _process_link(self, m, link, title=None):
line = m.group(0)
text = m.group(1)
if line[0] == '!':
return self.renderer.image(link, title, text)
self._in_link = True
text = self.output(text)
self._in_link = False
return self.renderer.link(link, title, text)
def output_double_emphasis(self, m):
text = m.group(2) or m.group(1)
text = self.output(text)
return self.renderer.double_emphasis(text)
def output_emphasis(self, m):
text = m.group(2) or m.group(1)
text = self.output(text)
return self.renderer.emphasis(text)
def output_code(self, m):
text = m.group(2)
return self.renderer.codespan(text)
def output_linebreak(self, m):
return self.renderer.linebreak()
def output_strikethrough(self, m):
text = self.output(m.group(1))
return self.renderer.strikethrough(text)
def output_text(self, m):
text = m.group(0)
return self.renderer.text(text)
class Renderer(object):
"""The default HTML renderer for rendering Markdown.
"""
def __init__(self, **kwargs):
self.options = kwargs
def placeholder(self):
"""Returns the default, empty output value for the renderer.
All renderer methods use the '+=' operator to append to this value.
Default is a string so rendering HTML can build up a result string with
the rendered Markdown.
Can be overridden by Renderer subclasses to be types like an empty
list, allowing the renderer to create a tree-like structure to
represent the document (which can then be reprocessed later into a
separate format like docx or pdf).
"""
return ''
def block_code(self, code, lang=None):
"""Rendering block level code. ``pre > code``.
:param code: text content of the code block.
:param lang: language of the given code.
"""
code = code.rstrip('\n')
if not lang:
code = escape(code, smart_amp=False)
return '<pre><code>%s\n</code></pre>\n' % code
code = escape(code, quote=True, smart_amp=False)
return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code)
def block_quote(self, text):
"""Rendering <blockquote> with the given text.
:param text: text content of the blockquote.
"""
return '<blockquote>%s\n</blockquote>\n' % text.rstrip('\n')
def block_html(self, html):
"""Rendering block level pure html content.
:param html: text content of the html snippet.
"""
if self.options.get('skip_style') and \
html.lower().startswith('<style'):
return ''
if self.options.get('escape'):
return escape(html)
return html
def header(self, text, level, raw=None):
"""Rendering header/heading tags like ``<h1>`` ``<h2>``.
:param text: rendered text content for the header.
:param level: a number for the header level, for example: 1.
:param raw: raw text content of the header.
"""
return '<h%d>%s</h%d>\n' % (level, text, level)
def hrule(self):
"""Rendering method for ``<hr>`` tag."""
if self.options.get('use_xhtml'):
return '<hr />\n'
return '<hr>\n'
def list(self, body, ordered=True):
"""Rendering list tags like ``<ul>`` and ``<ol>``.
:param body: body contents of the list.
:param ordered: whether this list is ordered or not.
"""
tag = 'ul'
if ordered:
tag = 'ol'
return '<%s>\n%s</%s>\n' % (tag, body, tag)
def list_item(self, text):
"""Rendering list item snippet. Like ``<li>``."""
return '<li>%s</li>\n' % text
def paragraph(self, text):
"""Rendering paragraph tags. Like ``<p>``."""
return '<p>%s</p>\n' % text.strip(' ')
def table(self, header, body):
"""Rendering table element. Wrap header and body in it.
:param header: header part of the table.
:param body: body part of the table.
"""
return (
'<table>\n<thead>%s</thead>\n'
'<tbody>\n%s</tbody>\n</table>\n'
) % (header, body)
def table_row(self, content):
"""Rendering a table row. Like ``<tr>``.
:param content: content of current table row.
"""
return '<tr>\n%s</tr>\n' % content
def table_cell(self, content, **flags):
"""Rendering a table cell. Like ``<th>`` ``<td>``.
:param content: content of current table cell.
:param header: whether this is header or not.
:param align: align of current table cell.
"""
if flags['header']:
tag = 'th'
else:
tag = 'td'
align = flags['align']
if not align:
return '<%s>%s</%s>\n' % (tag, content, tag)
return '<%s style="text-align:%s">%s</%s>\n' % (
tag, align, content, tag
)
def double_emphasis(self, text):
"""Rendering **strong** text.
:param text: text content for emphasis.
"""
return '<strong>%s</strong>' % text
def emphasis(self, text):
"""Rendering *emphasis* text.
:param text: text content for emphasis.
"""
return '<em>%s</em>' % text
def codespan(self, text):
"""Rendering inline `code` text.
:param text: text content for inline code.
"""
text = escape(text.rstrip(), smart_amp=False)
return '<code>%s</code>' % text
def linebreak(self):
"""Rendering line break like ``<br>``."""
if self.options.get('use_xhtml'):
return '<br />\n'
return '<br>\n'
def strikethrough(self, text):
"""Rendering ~~strikethrough~~ text.
:param text: text content for strikethrough.
"""
return '<del>%s</del>' % text
def text(self, text):
"""Rendering unformatted text.
:param text: text content.
"""
return escape(text)
def autolink(self, link, is_email=False):
"""Rendering a given link or email address.
:param link: link content or email address.
:param is_email: whether this is an email or not.
"""
text = link = escape(link)
if is_email:
link = 'mailto:%s' % link
return '<a href="%s">%s</a>' % (link, text)
def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
if link.startswith('javascript:'):
link = ''
if not title:
return '<a href="%s">%s</a>' % (link, text)
title = escape(title, quote=True)
return '<a href="%s" title="%s">%s</a>' % (link, title, text)
def image(self, src, title, text):
"""Rendering a image with title and text.
:param src: source link of the image.
:param title: title text of the image.
:param text: alt text of the image.
"""
if src.startswith('javascript:'):
src = ''
text = escape(text, quote=True)
if title:
title = escape(title, quote=True)
html = '<img src="%s" alt="%s" title="%s"' % (src, text, title)
else:
html = '<img src="%s" alt="%s"' % (src, text)
if self.options.get('use_xhtml'):
return '%s />' % html
return '%s>' % html
def inline_html(self, html):
"""Rendering span level pure html content.
:param html: text content of the html snippet.
"""
if self.options.get('escape'):
return escape(html)
return html
def newline(self):
"""Rendering newline element."""
return ''
def footnote_ref(self, key, index):
"""Rendering the ref anchor of a footnote.
:param key: identity key for the footnote.
:param index: the index count of current footnote.
"""
html = (
'<sup class="footnote-ref" id="fnref-%s">'
'<a href="#fn-%s" rel="footnote">%d</a></sup>'
) % (escape(key), escape(key), index)
return html
def footnote_item(self, key, text):
"""Rendering a footnote item.
:param key: identity key for the footnote.
:param text: text content of the footnote.
"""
back = (
'<a href="#fnref-%s" rev="footnote">↩</a>'
) % escape(key)
text = text.rstrip()
if text.endswith('</p>'):
text = re.sub(r'<\/p>$', r'%s</p>' % back, text)
else:
text = '%s<p>%s</p>' % (text, back)
html = '<li id="fn-%s">%s</li>\n' % (escape(key), text)
return html
def footnotes(self, text):
"""Wrapper for all footnotes.
:param text: contents of all footnotes.
"""
html = '<div class="footnotes">\n%s<ol>%s</ol>\n</div>\n'
return html % (self.hrule(), text)
class Markdown(object):
"""The Markdown parser.
:param renderer: An instance of ``Renderer``.
:param inline: An inline lexer class or instance.
:param block: A block lexer class or instance.
"""
def __init__(self, renderer=None, inline=None, block=None, **kwargs):
if not renderer:
renderer = Renderer(**kwargs)
self.renderer = renderer
if inline and inspect.isclass(inline):
inline = inline(renderer, **kwargs)
if block and inspect.isclass(block):
block = block(**kwargs)
if inline:
self.inline = inline
else:
rules = InlineGrammar()
if kwargs.get('hard_wrap'):
rules.hard_wrap()
self.inline = InlineLexer(renderer, rules=rules)
self.block = block or BlockLexer(BlockGrammar())
self.options = kwargs
self.footnotes = []
self.tokens = []
# detect if it should parse text in block html
self._parse_block_html = kwargs.get('parse_block_html')
def __call__(self, text):
return self.parse(text)
def render(self, text):
"""Render the Markdown text.
:param text: markdown formatted text content.
"""
return self.parse(text)
def parse(self, text):
out = self.output(preprocessing(text))
keys = self.block.def_footnotes
# reset block
self.block.def_links = {}
self.block.def_footnotes = {}
# reset inline
self.inline.links = {}
self.inline.footnotes = {}
if not self.footnotes:
return out
footnotes = filter(lambda o: keys.get(o['key']), self.footnotes)
self.footnotes = sorted(
footnotes, key=lambda o: keys.get(o['key']), reverse=True
)
body = self.renderer.placeholder()
while self.footnotes:
note = self.footnotes.pop()
body += self.renderer.footnote_item(
note['key'], note['text']
)
out += self.renderer.footnotes(body)
return out
def pop(self):
if not self.tokens:
return None
self.token = self.tokens.pop()
return self.token
def peek(self):
if self.tokens:
return self.tokens[-1]
return None # pragma: no cover
def output(self, text, rules=None):
self.tokens = self.block(text, rules)
self.tokens.reverse()
self.inline.setup(self.block.def_links, self.block.def_footnotes)
out = self.renderer.placeholder()
while self.pop():
out += self.tok()
return out
def tok(self):
t = self.token['type']
# sepcial cases
if t.endswith('_start'):
t = t[:-6]
return getattr(self, 'output_%s' % t)()
def tok_text(self):
text = self.token['text']
while self.peek()['type'] == 'text':
text += '\n' + self.pop()['text']
return self.inline(text)
def output_newline(self):
return self.renderer.newline()
def output_hrule(self):
return self.renderer.hrule()
def output_heading(self):
return self.renderer.header(
self.inline(self.token['text']),
self.token['level'],
self.token['text'],
)
def output_code(self):
return self.renderer.block_code(
self.token['text'], self.token['lang']
)
def output_table(self):
aligns = self.token['align']
aligns_length = len(aligns)
cell = self.renderer.placeholder()
# header part
header = self.renderer.placeholder()
for i, value in enumerate(self.token['header']):
align = aligns[i] if i < aligns_length else None
flags = {'header': True, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
header += self.renderer.table_row(cell)
# body part
body = self.renderer.placeholder()
for i, row in enumerate(self.token['cells']):
cell = self.renderer.placeholder()
for j, value in enumerate(row):
align = aligns[j] if j < aligns_length else None
flags = {'header': False, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
body += self.renderer.table_row(cell)
return self.renderer.table(header, body)
def output_block_quote(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'block_quote_end':
body += self.tok()
return self.renderer.block_quote(body)
def output_list(self):
ordered = self.token['ordered']
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_end':
body += self.tok()
return self.renderer.list(body, ordered)
def output_list_item(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_item_end':
if self.token['type'] == 'text':
body += self.tok_text()
else:
body += self.tok()
return self.renderer.list_item(body)
def output_loose_item(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_item_end':
body += self.tok()
return self.renderer.list_item(body)
def output_footnote(self):
self.inline._in_footnote = True
body = self.renderer.placeholder()
key = self.token['key']
while self.pop()['type'] != 'footnote_end':
body += self.tok()
self.footnotes.append({'key': key, 'text': body})
self.inline._in_footnote = False
return self.renderer.placeholder()
def output_close_html(self):
text = self.token['text']
return self.renderer.block_html(text)
def output_open_html(self):
text = self.token['text']
tag = self.token['tag']
if self._parse_block_html and tag not in _pre_tags:
text = self.inline(text, rules=self.inline.inline_html_rules)
extra = self.token.get('extra') or ''
html = '<%s%s>%s</%s>' % (tag, extra, text, tag)
return self.renderer.block_html(html)
def output_paragraph(self):
return self.renderer.paragraph(self.inline(self.token['text']))
def output_text(self):
return self.renderer.paragraph(self.tok_text())
def markdown(text, escape=True, **kwargs):
"""Render markdown formatted text to html.
:param text: markdown formatted text content.
:param escape: if set to False, all html tags will not be escaped.
:param use_xhtml: output with xhtml tags.
:param hard_wrap: if set to True, it will has GFM line breaks feature.
:param parse_block_html: parse text only in block level html.
:param parse_inline_html: parse text only in inline level html.
"""
return Markdown(escape=escape, **kwargs)(text)
| mit |
sunshinelover/chanlun | vn.trader/ctaAlgo/ctaBase.py | 1 | 4647 | # encoding: UTF-8
'''
本文件中包含了CTA模块中用到的一些基础设置、类和常量等。
'''
from __future__ import division
# 把vn.trader根目录添加到python环境变量中
import sys
sys.path.append('..')
# 常量定义
# CTA引擎中涉及到的交易方向类型
CTAORDER_BUY = u'买开'
CTAORDER_SELL = u'卖平'
CTAORDER_SHORT = u'卖开'
CTAORDER_COVER = u'买平'
# 本地停止单状态
STOPORDER_WAITING = u'等待中'
STOPORDER_CANCELLED = u'已撤销'
STOPORDER_TRIGGERED = u'已触发'
# 本地停止单前缀
STOPORDERPREFIX = 'CtaStopOrder.'
# 数据库名称
SETTING_DB_NAME = 'VnTrader_Setting_Db'
TICK_DB_NAME = 'VnTrader_Tick_Db'
DAILY_DB_NAME = 'VnTrader_Daily_Db'
WEEKLY_DB_NAME = "VnTrader_Weekly_Db"
MONTHLY_DB_NAME = "VnTrader_Monthly_Db"
MINUTE_DB_NAME = 'VnTrader_1Min_Db'
MINUTE5_DB_NAME = 'VnTrader_5Min_Db'
MINUTE15_DB_NAME = 'VnTrader_15Min_Db'
MINUTE30_DB_NAME = 'VnTrader_30Min_Db'
MINUTE60_DB_NAME = 'VnTrader_60Min_Db'
# 引擎类型,用于区分当前策略的运行环境
ENGINETYPE_BACKTESTING = 'backtesting' # 回测
ENGINETYPE_TRADING = 'trading' # 实盘
# CTA引擎中涉及的数据类定义
from vtConstant import EMPTY_UNICODE, EMPTY_STRING, EMPTY_FLOAT, EMPTY_INT
########################################################################
class StopOrder(object):
"""本地停止单"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING
self.orderType = EMPTY_UNICODE
self.direction = EMPTY_UNICODE
self.offset = EMPTY_UNICODE
self.price = EMPTY_FLOAT
self.volume = EMPTY_INT
self.strategy = None # 下停止单的策略对象
self.stopOrderID = EMPTY_STRING # 停止单的本地编号
self.status = EMPTY_STRING # 停止单状态
########################################################################
class CtaBarData(object):
"""K线数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.open = EMPTY_FLOAT # OHLC
self.high = EMPTY_FLOAT
self.low = EMPTY_FLOAT
self.close = EMPTY_FLOAT
self.date = EMPTY_STRING # bar开始的时间,日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
self.volume = EMPTY_INT # 成交量
self.openInterest = EMPTY_INT # 持仓量
########################################################################
class CtaTickData(object):
"""Tick数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.volume = EMPTY_INT # 最新成交量
self.openInterest = EMPTY_INT # 持仓量
self.upperLimit = EMPTY_FLOAT # 涨停价
self.lowerLimit = EMPTY_FLOAT # 跌停价
# tick的时间
self.date = EMPTY_STRING # 日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT | mit |
craigcitro/pydatalab | legacy_tests/kernel/utils_tests.py | 4 | 10815 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import range
import datetime as dt
import collections
import mock
import pandas
import unittest
import google.auth
# import Python so we can mock the parts we need to here.
import IPython
import IPython.core.magic
IPython.core.magic.register_line_cell_magic = mock.Mock()
IPython.core.magic.register_line_magic = mock.Mock()
IPython.core.magic.register_cell_magic = mock.Mock()
IPython.get_ipython = mock.Mock()
import datalab.bigquery # noqa: E402
import datalab.context # noqa: E402
import datalab.utils.commands # noqa: E402
class TestCases(unittest.TestCase):
@staticmethod
def _get_expected_cols():
cols = [
{'type': 'number', 'id': 'Column1', 'label': 'Column1'},
{'type': 'number', 'id': 'Column2', 'label': 'Column2'},
{'type': 'string', 'id': 'Column3', 'label': 'Column3'},
{'type': 'boolean', 'id': 'Column4', 'label': 'Column4'},
{'type': 'number', 'id': 'Column5', 'label': 'Column5'},
{'type': 'datetime', 'id': 'Column6', 'label': 'Column6'}
]
return cols
@staticmethod
def _timestamp(d):
return (d - dt.datetime(1970, 1, 1)).total_seconds()
@staticmethod
def _get_raw_rows():
rows = [
{'f': [
{'v': 1}, {'v': 2}, {'v': '3'}, {'v': 'true'}, {'v': 0.0},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 1))}
]},
{'f': [
{'v': 11}, {'v': 12}, {'v': '13'}, {'v': 'false'}, {'v': 0.2},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 2))}
]},
{'f': [
{'v': 21}, {'v': 22}, {'v': '23'}, {'v': 'true'}, {'v': 0.3},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 3))}
]},
{'f': [
{'v': 31}, {'v': 32}, {'v': '33'}, {'v': 'false'}, {'v': 0.4},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 4))}
]},
{'f': [
{'v': 41}, {'v': 42}, {'v': '43'}, {'v': 'true'}, {'v': 0.5},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 5))}
]},
{'f': [
{'v': 51}, {'v': 52}, {'v': '53'}, {'v': 'true'}, {'v': 0.6},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 6))}
]}
]
return rows
@staticmethod
def _get_expected_rows():
rows = [
{'c': [
{'v': 1}, {'v': 2}, {'v': '3'}, {'v': True}, {'v': 0.0}, {'v': dt.datetime(2000, 1, 1)}
]},
{'c': [
{'v': 11}, {'v': 12}, {'v': '13'}, {'v': False}, {'v': 0.2}, {'v': dt.datetime(2000, 1, 2)}
]},
{'c': [
{'v': 21}, {'v': 22}, {'v': '23'}, {'v': True}, {'v': 0.3}, {'v': dt.datetime(2000, 1, 3)}
]},
{'c': [
{'v': 31}, {'v': 32}, {'v': '33'}, {'v': False}, {'v': 0.4}, {'v': dt.datetime(2000, 1, 4)}
]},
{'c': [
{'v': 41}, {'v': 42}, {'v': '43'}, {'v': True}, {'v': 0.5}, {'v': dt.datetime(2000, 1, 5)}
]},
{'c': [
{'v': 51}, {'v': 52}, {'v': '53'}, {'v': True}, {'v': 0.6}, {'v': dt.datetime(2000, 1, 6)}
]}
]
return rows
@staticmethod
def _get_test_data_as_list_of_dicts():
test_data = [
{'Column1': 1, 'Column2': 2, 'Column3': '3',
'Column4': True, 'Column5': 0.0, 'Column6': dt.datetime(2000, 1, 1)},
{'Column1': 11, 'Column2': 12, 'Column3': '13',
'Column4': False, 'Column5': 0.2, 'Column6': dt.datetime(2000, 1, 2)},
{'Column1': 21, 'Column2': 22, 'Column3': '23',
'Column4': True, 'Column5': 0.3, 'Column6': dt.datetime(2000, 1, 3)},
{'Column1': 31, 'Column2': 32, 'Column3': '33',
'Column4': False, 'Column5': 0.4, 'Column6': dt.datetime(2000, 1, 4)},
{'Column1': 41, 'Column2': 42, 'Column3': '43',
'Column4': True, 'Column5': 0.5, 'Column6': dt.datetime(2000, 1, 5)},
{'Column1': 51, 'Column2': 52, 'Column3': '53',
'Column4': True, 'Column5': 0.6, 'Column6': dt.datetime(2000, 1, 6)}
]
# Use OrderedDicts to make testing the result easier.
for i in range(0, len(test_data)):
test_data[i] = collections.OrderedDict(sorted(list(test_data[i].items()), key=lambda t: t[0]))
return test_data
def test_get_data_from_list_of_dicts(self):
self._test_get_data(TestCases._get_test_data_as_list_of_dicts(), TestCases._get_expected_cols(),
TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_list_of_dicts)
self._test_get_data(TestCases._get_test_data_as_list_of_dicts(), TestCases._get_expected_cols(),
TestCases._get_expected_rows(), 6, datalab.utils.commands._utils.get_data)
def test_get_data_from_list_of_lists(self):
test_data = [
[1, 2, '3', True, 0.0, dt.datetime(2000, 1, 1)],
[11, 12, '13', False, 0.2, dt.datetime(2000, 1, 2)],
[21, 22, '23', True, 0.3, dt.datetime(2000, 1, 3)],
[31, 32, '33', False, 0.4, dt.datetime(2000, 1, 4)],
[41, 42, '43', True, 0.5, dt.datetime(2000, 1, 5)],
[51, 52, '53', True, 0.6, dt.datetime(2000, 1, 6)],
]
self._test_get_data(test_data, TestCases._get_expected_cols(), TestCases._get_expected_rows(),
6, datalab.utils.commands._utils._get_data_from_list_of_lists)
self._test_get_data(test_data, TestCases._get_expected_cols(), TestCases._get_expected_rows(),
6, datalab.utils.commands._utils.get_data)
def test_get_data_from_dataframe(self):
df = pandas.DataFrame(self._get_test_data_as_list_of_dicts())
self._test_get_data(df, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_dataframe)
self._test_get_data(df, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils.get_data)
@mock.patch('datalab.bigquery._api.Api.tabledata_list')
@mock.patch('datalab.bigquery._table.Table.exists')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.context._context.Context.default')
def test_get_data_from_table(self, mock_context_default, mock_api_tables_get,
mock_table_exists, mock_api_tabledata_list):
data = TestCases._get_expected_rows()
mock_context_default.return_value = TestCases._create_context()
mock_api_tables_get.return_value = {
'numRows': len(data),
'schema': {
'fields': [
{'name': 'Column1', 'type': 'INTEGER'},
{'name': 'Column2', 'type': 'INTEGER'},
{'name': 'Column3', 'type': 'STRING'},
{'name': 'Column4', 'type': 'BOOLEAN'},
{'name': 'Column5', 'type': 'FLOAT'},
{'name': 'Column6', 'type': 'TIMESTAMP'}
]
}
}
mock_table_exists.return_value = True
raw_data = self._get_raw_rows()
def tabledata_list(*args, **kwargs):
start_index = kwargs['start_index']
max_results = kwargs['max_results']
if max_results < 0:
max_results = len(data)
return {'rows': raw_data[start_index:start_index + max_results]}
mock_api_tabledata_list.side_effect = tabledata_list
t = datalab.bigquery.Table('foo.bar')
self._test_get_data(t, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_table)
self._test_get_data(t, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils.get_data)
def test_get_data_from_empty_list(self):
self._test_get_data([], [], [], 0, datalab.utils.commands._utils.get_data)
def test_get_data_from_malformed_list(self):
with self.assertRaises(Exception) as error:
self._test_get_data(['foo', 'bar'], [], [], 0, datalab.utils.commands._utils.get_data)
self.assertEquals('To get tabular data from a list it must contain dictionaries or lists.',
str(error.exception))
def _test_get_data(self, test_data, cols, rows, expected_count, fn):
self.maxDiff = None
data, count = fn(test_data)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows}, data)
# Test first_row. Note that count must be set in this case so we use a value greater than the
# data set size.
for first in range(0, 6):
data, count = fn(test_data, first_row=first, count=10)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows[first:]}, data)
# Test first_row + count
for first in range(0, 6):
data, count = fn(test_data, first_row=first, count=2)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows[first:first + 2]}, data)
# Test subsets of columns
# No columns
data, count = fn(test_data, fields=[])
self.assertEquals({'cols': [], 'rows': [{'c': []}] * expected_count}, data)
# Single column
data, count = fn(test_data, fields=['Column3'])
if expected_count == 0:
return
self.assertEquals({'cols': [cols[2]],
'rows': [{'c': [row['c'][2]]} for row in rows]}, data)
# Multi-columns
data, count = fn(test_data, fields=['Column1', 'Column3', 'Column6'])
self.assertEquals({'cols': [cols[0], cols[2], cols[5]],
'rows': [{'c': [row['c'][0], row['c'][2], row['c'][5]]} for row in rows]},
data)
# Switch order
data, count = fn(test_data, fields=['Column3', 'Column1'])
self.assertEquals({'cols': [cols[2], cols[0]],
'rows': [{'c': [row['c'][2], row['c'][0]]} for row in rows]}, data)
# Select all
data, count = fn(test_data,
fields=['Column1', 'Column2', 'Column3', 'Column4', 'Column5', 'Column6'])
self.assertEquals({'cols': cols, 'rows': rows}, data)
@staticmethod
def _create_api():
context = TestCases._create_context()
return datalab.bigquery._api.Api(context.credentials, context.project_id)
@staticmethod
def _create_context():
project_id = 'test'
creds = mock.Mock(spec=google.auth.credentials.Credentials)
return datalab.context.Context(project_id, creds)
| apache-2.0 |
rmfitzpatrick/ansible | lib/ansible/modules/cloud/cloudstack/cs_vpc.py | 26 | 10946 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it an/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http//www.gnu.or/license/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_vpc
short_description: "Manages VPCs on Apache CloudStack based clouds."
description:
- "Create, update and delete VPCs."
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- "Name of the VPC."
required: true
display_text:
description:
- "Display text of the VPC."
- "If not set, C(name) will be used for creating."
required: false
default: null
cidr:
description:
- "CIDR of the VPC, e.g. 10.1.0.0/16"
- "All VPC guest networks' CIDRs must be within this CIDR."
- "Required on C(state=present)."
required: false
default: null
network_domain:
description:
- "Network domain for the VPC."
- "All networks inside the VPC will belong to this domain."
required: false
default: null
vpc_offering:
description:
- "Name of the VPC offering."
- "If not set, default VPC offering is used."
required: false
default: null
state:
description:
- "State of the VPC."
required: false
default: present
choices:
- present
- absent
- restarted
domain:
description:
- "Domain the VPC is related to."
required: false
default: null
account:
description:
- "Account the VPC is related to."
required: false
default: null
project:
description:
- "Name of the project the VPC is related to."
required: false
default: null
zone:
description:
- "Name of the zone."
- "If not set, default zone is used."
required: false
default: null
tags:
description:
- "List of tags. Tags are a list of dictionaries having keys C(key) and C(value)."
- "For deleting all tags, set an empty list e.g. C(tags: [])."
required: false
default: null
aliases:
- tag
poll_async:
description:
- "Poll async jobs until job has finished."
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure a VPC is present
- local_action:
module: cs_vpc
name: my_vpc
display_text: My example VPC
cidr: 10.10.0.0/16
# Ensure a VPC is absent
- local_action:
module: cs_vpc
name: my_vpc
state: absent
# Ensure a VPC is restarted
- local_action:
module: cs_vpc
name: my_vpc
state: restarted
'''
RETURN = '''
---
id:
description: "UUID of the VPC."
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: "Name of the VPC."
returned: success
type: string
sample: my_vpc
display_text:
description: "Display text of the VPC."
returned: success
type: string
sample: My example VPC
cidr:
description: "CIDR of the VPC."
returned: success
type: string
sample: 10.10.0.0/16
network_domain:
description: "Network domain of the VPC."
returned: success
type: string
sample: example.com
region_level_vpc:
description: "Whether the VPC is region level or not."
returned: success
type: boolean
sample: true
restart_required:
description: "Whether the VPC router needs a restart or not."
returned: success
type: boolean
sample: true
distributed_vpc_router:
description: "Whether the VPC uses distributed router or not."
returned: success
type: boolean
sample: true
redundant_vpc_router:
description: "Whether the VPC has redundant routers or not."
returned: success
type: boolean
sample: true
domain:
description: "Domain the VPC is related to."
returned: success
type: string
sample: example domain
account:
description: "Account the VPC is related to."
returned: success
type: string
sample: example account
project:
description: "Name of project the VPC is related to."
returned: success
type: string
sample: Production
zone:
description: "Name of zone the VPC is in."
returned: success
type: string
sample: ch-gva-2
state:
description: "State of the VPC."
returned: success
type: string
sample: Enabled
tags:
description: "List of resource tags associated with the VPC."
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackVpc(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVpc, self).__init__(module)
self.returns = {
'cidr': 'cidr',
'networkdomain': 'network_domain',
'redundantvpcrouter': 'redundant_vpc_router',
'distributedvpcrouter': 'distributed_vpc_router',
'regionlevelvpc': 'region_level_vpc',
'restartrequired': 'restart_required',
}
self.vpc = None
self.vpc_offering = None
def get_vpc_offering(self, key=None):
if self.vpc_offering:
return self._get_by_key(key, self.vpc_offering)
vpc_offering = self.module.params.get('vpc_offering')
args = {}
if vpc_offering:
args['name'] = vpc_offering
else:
args['isdefault'] = True
vpc_offerings = self.query_api('listVPCOfferings', **args)
if vpc_offerings:
self.vpc_offering = vpc_offerings['vpcoffering'][0]
return self._get_by_key(key, self.vpc_offering)
self.module.fail_json(msg="VPC offering '%s' not found" % vpc_offering)
def get_vpc(self):
if self.vpc:
return self.vpc
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
if vpcs:
vpc_name = self.module.params.get('name')
for v in vpcs['vpc']:
if vpc_name in [v['name'], v['displaytext'], v['id']]:
# Fail if the identifyer matches more than one VPC
if self.vpc:
self.module.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc_name)
else:
self.vpc = v
return self.vpc
def restart_vpc(self):
self.result['changed'] = True
vpc = self.get_vpc()
if vpc and not self.module.check_mode:
args = {
'id': vpc['id'],
}
res = self.query_api('restartVPC', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'vpc')
return vpc
def present_vpc(self):
vpc = self.get_vpc()
if not vpc:
vpc = self._create_vpc(vpc)
else:
vpc = self._update_vpc(vpc)
if vpc:
vpc = self.ensure_tags(resource=vpc, resource_type='Vpc')
return vpc
def _create_vpc(self, vpc):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'vpcofferingid': self.get_vpc_offering(key='id'),
'cidr': self.module.params.get('cidr'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
self.result['diff']['after'] = args
if not self.module.check_mode:
res = self.query_api('createVPC', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpc = self.poll_job(res, 'vpc')
return vpc
def _update_vpc(self, vpc):
args = {
'id': vpc['id'],
'displaytext': self.module.params.get('display_text'),
}
if self.has_changed(args, vpc):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateVPC', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpc = self.poll_job(res, 'vpc')
return vpc
def absent_vpc(self):
vpc = self.get_vpc()
if vpc:
self.result['changed'] = True
self.result['diff']['before'] = vpc
if not self.module.check_mode:
res = self.query_api('deleteVPC', id=vpc['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'vpc')
return vpc
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
cidr=dict(default=None),
display_text=dict(default=None),
vpc_offering=dict(default=None),
network_domain=dict(default=None),
state=dict(choices=['present', 'absent', 'restarted'], default='present'),
domain=dict(default=None),
account=dict(default=None),
project=dict(default=None),
zone=dict(default=None),
tags=dict(type='list', aliases=['tag'], default=None),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
required_if=[
('state', 'present', ['cidr']),
],
supports_check_mode=True,
)
acs_vpc = AnsibleCloudStackVpc(module)
state = module.params.get('state')
if state == 'absent':
vpc = acs_vpc.absent_vpc()
elif state == 'restarted':
vpc = acs_vpc.restart_vpc()
else:
vpc = acs_vpc.present_vpc()
result = acs_vpc.get_result(vpc)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
deandunbar/html2bwml | venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py | 357 | 38875 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPSHandler, HTTPHandler, HTTPRedirectHandler,
build_opener)
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else:
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPSHandler, HTTPHandler, HTTPRedirectHandler,
build_opener)
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError:
class CertificateError(ValueError):
pass
def _dnsname_to_pat(dn):
pats = []
for frag in dn.split(r'.'):
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty
# dotless fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError:
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError:
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'):
ZipFile = BaseZipFile
else:
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
_fsencoding = sys.getfilesystemencoding()
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
| mit |
wonderfly/kubernetes | cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | 62 | 29611 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.kubernetes.flagmanager import FlagManager
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.destroy('feature-gates')
kubelet_opts.destroy('experimental-nvidia-gpus')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# cleanup old flagmanagers
FlagManager('kubelet').destroy_all()
FlagManager('kube-proxy').destroy_all()
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the kubelet service
- stop the kube-proxy service
- remove the 'kubernetes-worker.cni-plugins.installed' state
'''
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname())
service_stop('kubelet')
service_stop('kube-proxy')
remove_state('kubernetes-worker.cni-plugins.installed')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
archive = hookenv.resource_get('cni')
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
if (_systemctl_is_active('snap.kubelet.daemon')):
hookenv.status_set('active', 'Kubernetes worker running.')
# if kubelet is not running, we're waiting on something else to converge
elif (not _systemctl_is_active('snap.kubelet.daemon')):
hookenv.status_set('waiting', 'Waiting for kubelet to start.')
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved', 'kube-control.dns.available',
'cni.available', 'kubernetes-worker.restart-needed')
def start_worker(kube_api, kube_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers))
configure_worker_services(servers, dns, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
try:
_apply_node_label(label, delete=True)
except CalledProcessError:
hookenv.log('Error removing node label {}'.format(label))
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
key = layer_options.get('client_key_path')
cert = layer_options.get('client_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca, key, cert,
user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig('/root/.kube/config', server, ca, key, cert,
user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca, key, cert,
user='kubelet')
def configure_worker_services(api_servers, dns, cluster_cidr):
''' Add remaining flags for the worker services and configure snaps to use
them '''
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add('require-kubeconfig', 'true')
kubelet_opts.add('kubeconfig', kubeconfig_path)
kubelet_opts.add('network-plugin', 'cni')
kubelet_opts.add('logtostderr', 'true')
kubelet_opts.add('v', '0')
kubelet_opts.add('address', '0.0.0.0')
kubelet_opts.add('port', '10250')
kubelet_opts.add('cluster-dns', dns['sdn-ip'])
kubelet_opts.add('cluster-domain', dns['domain'])
kubelet_opts.add('anonymous-auth', 'false')
kubelet_opts.add('client-ca-file', ca_cert_path)
kubelet_opts.add('tls-cert-file', server_cert_path)
kubelet_opts.add('tls-private-key-file', server_key_path)
kube_proxy_opts = FlagManager('kube-proxy')
kube_proxy_opts.add('cluster-cidr', cluster_cidr)
kube_proxy_opts.add('kubeconfig', kubeconfig_path)
kube_proxy_opts.add('logtostderr', 'true')
kube_proxy_opts.add('v', '0')
kube_proxy_opts.add('master', random.choice(api_servers), strict=True)
cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ')
check_call(cmd)
cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ')
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
context='juju-context', cluster='juju-cluster'):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
'--client-key={2} --client-certificate={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress replication controller manifest
manifest = addon_path.format('ingress-replication-controller.yaml')
render('ingress-replication-controller.yaml', manifest, context)
hookenv.log('Creating the ingress replication controller.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
flag = 'allow-privileged'
hookenv.log('Setting {}={}'.format(flag, privileged))
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add(flag, privileged)
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts.add('experimental-nvidia-gpus', '1')
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts.add('feature-gates', 'Accelerators=true')
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
kubelet_opts.destroy('experimental-nvidia-gpus')
else:
kubelet_opts.remove('feature-gates', 'Accelerators=true')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
hostname = gethostname()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, hostname, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
check_call(split(cmd))
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
| apache-2.0 |
nicolasnoble/grpc | src/python/grpcio_tests/tests/unit/thread_pool.py | 13 | 1193 | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from concurrent import futures
class RecordingThreadPool(futures.ThreadPoolExecutor):
"""A thread pool that records if used."""
def __init__(self, max_workers):
self._tp_executor = futures.ThreadPoolExecutor(max_workers=max_workers)
self._lock = threading.Lock()
self._was_used = False
def submit(self, fn, *args, **kwargs): # pylint: disable=arguments-differ
with self._lock:
self._was_used = True
self._tp_executor.submit(fn, *args, **kwargs)
def was_used(self):
with self._lock:
return self._was_used
| apache-2.0 |
thaumos/ansible | lib/ansible/modules/cloud/amazon/ecs_task.py | 17 | 15285 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_task
short_description: run, start or stop a task in ecs
description:
- Creates or deletes instances of task definitions.
version_added: "2.0"
author: Mark Chance (@Java1Guy)
requirements: [ json, botocore, boto3 ]
options:
operation:
description:
- Which task operation to execute
required: True
choices: ['run', 'start', 'stop']
cluster:
description:
- The name of the cluster to run the task on
required: False
task_definition:
description:
- The task definition to start or run
required: False
overrides:
description:
- A dictionary of values to pass to the new instances
required: False
count:
description:
- How many new instances to start
required: False
task:
description:
- The task to stop
required: False
container_instances:
description:
- The list of container instances on which to deploy the task
required: False
started_by:
description:
- A value showing who or what started the task (for informational purposes)
required: False
network_configuration:
description:
- network configuration of the service. Only applicable for task definitions created with C(awsvpc) I(network_mode).
- I(network_configuration) has two keys, I(subnets), a list of subnet IDs to which the task is attached and I(security_groups),
a list of group names or group IDs for the task
version_added: 2.6
launch_type:
description:
- The launch type on which to run your service
required: false
version_added: 2.8
choices: ["EC2", "FARGATE"]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple example of run task
- name: Run task
ecs_task:
operation: run
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
count: 1
started_by: ansible_user
register: task_output
# Simple example of start task
- name: Start a task
ecs_task:
operation: start
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
container_instances:
- arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
started_by: ansible_user
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: RUN a task on Fargate
ecs_task:
operation: run
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
started_by: ansible_user
launch_type: FARGATE
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: Stop a task
ecs_task:
operation: stop
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
'''
RETURN = '''
task:
description: details about the tast that was started
returned: success
type: complex
contains:
taskArn:
description: The Amazon Resource Name (ARN) that identifies the task.
returned: always
type: str
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
returned: only when details is true
type: str
taskDefinitionArn:
description: The Amazon Resource Name (ARN) of the task definition.
returned: only when details is true
type: str
containerInstanceArn:
description: The Amazon Resource Name (ARN) of the container running the task.
returned: only when details is true
type: str
overrides:
description: The container overrides set for this task.
returned: only when details is true
type: list of complex
lastStatus:
description: The last recorded status of the task.
returned: only when details is true
type: str
desiredStatus:
description: The desired status of the task.
returned: only when details is true
type: str
containers:
description: The container details.
returned: only when details is true
type: list of complex
startedBy:
description: The used who started the task.
returned: only when details is true
type: str
stoppedReason:
description: The reason why the task was stopped.
returned: only when details is true
type: str
createdAt:
description: The timestamp of when the task was created.
returned: only when details is true
type: str
startedAt:
description: The timestamp of when the task was started.
returned: only when details is true
type: str
stoppedAt:
description: The timestamp of when the task was stopped.
returned: only when details is true
type: str
launchType:
description: The launch type on which to run your task.
returned: always
type: str
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_ec2_security_group_ids_from_names
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
class EcsExecManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
self.ecs = module.client('ecs')
self.ec2 = module.client('ec2')
def format_network_configuration(self, network_config):
result = dict()
if 'subnets' in network_config:
result['subnets'] = network_config['subnets']
else:
self.module.fail_json(msg="Network configuration must include subnets")
if 'security_groups' in network_config:
groups = network_config['security_groups']
if any(not sg.startswith('sg-') for sg in groups):
try:
vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't look up security groups")
result['securityGroups'] = groups
return dict(awsvpcConfiguration=result)
def list_tasks(self, cluster_name, service_name, status):
response = self.ecs.list_tasks(
cluster=cluster_name,
family=service_name,
desiredStatus=status
)
if len(response['taskArns']) > 0:
for c in response['taskArns']:
if c.endswith(service_name):
return c
return None
def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type):
if overrides is None:
overrides = dict()
params = dict(cluster=cluster, taskDefinition=task_definition,
overrides=overrides, count=count, startedBy=startedBy)
if self.module.params['network_configuration']:
params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
if launch_type:
params['launchType'] = launch_type
try:
response = self.ecs.run_task(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't run task")
# include tasks and failures
return response['tasks']
def start_task(self, cluster, task_definition, overrides, container_instances, startedBy):
args = dict()
if cluster:
args['cluster'] = cluster
if task_definition:
args['taskDefinition'] = task_definition
if overrides:
args['overrides'] = overrides
if container_instances:
args['containerInstances'] = container_instances
if startedBy:
args['startedBy'] = startedBy
if self.module.params['network_configuration']:
args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
try:
response = self.ecs.start_task(**args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't start task")
# include tasks and failures
return response['tasks']
def stop_task(self, cluster, task):
response = self.ecs.stop_task(cluster=cluster, task=task)
return response['task']
def ecs_api_handles_launch_type(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.8.4')
def ecs_api_handles_network_configuration(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44')
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str'), # R S
network_configuration=dict(required=False, type='dict'),
launch_type=dict(required=False, choices=['EC2', 'FARGATE'])
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
# Validate Inputs
if module.params['operation'] == 'run':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if 'container_instances' not in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if 'task' not in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'],
module.params['launch_type'])
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by']
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
winklerand/pandas | pandas/io/msgpack/__init__.py | 26 | 1233 | # coding: utf-8
from collections import namedtuple
from pandas.io.msgpack.exceptions import * # noqa
from pandas.io.msgpack._version import version # noqa
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os # noqa
from pandas.io.msgpack._packer import Packer # noqa
from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| bsd-3-clause |
lukw00/buck | third-party/py/unittest2/unittest2/__init__.py | 155 | 2406 | """
unittest2
unittest2 is a backport of the new features added to the unittest testing
framework in Python 2.7. It is tested to run on Python 2.4 - 2.6.
To use unittest2 instead of unittest simply replace ``import unittest`` with
``import unittest2``.
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', '__version__', 'collector']
__version__ = '0.5.1'
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
from unittest2.collector import collector
from unittest2.result import TestResult
from unittest2.case import (
TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure
)
from unittest2.suite import BaseTestSuite, TestSuite
from unittest2.loader import (
TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases
)
from unittest2.main import TestProgram, main, main_
from unittest2.runner import TextTestRunner, TextTestResult
try:
from unittest2.signals import (
installHandler, registerResult, removeResult, removeHandler
)
except ImportError:
# Compatibility with platforms that don't have the signal module
pass
else:
__all__.extend(['installHandler', 'registerResult', 'removeResult',
'removeHandler'])
# deprecated
_TextTestResult = TextTestResult
__unittest = True | apache-2.0 |
skg-net/ansible | lib/ansible/modules/cloud/ovirt/ovirt_nic.py | 2 | 9235 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_nic
short_description: Module to manage network interfaces of Virtual Machines in oVirt/RHV
version_added: "2.3"
author:
- Ondra Machacek (@machacekondra)
description:
- Module to manage network interfaces of Virtual Machines in oVirt/RHV.
options:
name:
description:
- Name of the network interface to manage.
required: true
vm:
description:
- Name of the Virtual Machine to manage.
- You must provide either C(vm) parameter or C(template) parameter.
template:
description:
- Name of the template to manage.
- You must provide either C(vm) parameter or C(template) parameter.
version_added: "2.4"
state:
description:
- Should the Virtual Machine NIC be present/absent/plugged/unplugged.
choices: [ absent, plugged, present, unplugged ]
default: present
network:
description:
- Logical network to which the VM network interface should use,
by default Empty network is used if network is not specified.
profile:
description:
- Virtual network interface profile to be attached to VM network interface.
interface:
description:
- "Type of the network interface. For example e1000, pci_passthrough, rtl8139, rtl8139_virtio, spapr_vlan or virtio."
- "It's required parameter when creating the new NIC."
mac_address:
description:
- Custom MAC address of the network interface, by default it's obtained from MAC pool.
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Add NIC to VM
ovirt_nic:
state: present
vm: myvm
name: mynic
interface: e1000
mac_address: 00:1a:4a:16:01:56
profile: ovirtmgmt
network: ovirtmgmt
- name: Plug NIC to VM
ovirt_nic:
state: plugged
vm: myvm
name: mynic
- name: Unplug NIC from VM
ovirt_nic:
state: unplugged
vm: myvm
name: mynic
- name: Add NIC to template
ovirt_nic:
auth: "{{ ovirt_auth }}"
state: present
template: my_template
name: nic1
interface: virtio
profile: ovirtmgmt
network: ovirtmgmt
- name: Remove NIC from VM
ovirt_nic:
state: absent
vm: myvm
name: mynic
'''
RETURN = '''
id:
description: ID of the network interface which is managed
returned: On success if network interface is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
nic:
description: "Dictionary of all the network interface attributes. Network interface attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
returned: On success if network interface is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
)
class EntityNicsModule(BaseModule):
def __init__(self, *args, **kwargs):
super(EntityNicsModule, self).__init__(*args, **kwargs)
self.vnic_id = None
@property
def vnic_id(self):
return self._vnic_id
@vnic_id.setter
def vnic_id(self, vnic_id):
self._vnic_id = vnic_id
def build_entity(self):
return otypes.Nic(
name=self._module.params.get('name'),
interface=otypes.NicInterface(
self._module.params.get('interface')
) if self._module.params.get('interface') else None,
vnic_profile=otypes.VnicProfile(
id=self.vnic_id,
) if self.vnic_id else None,
mac=otypes.Mac(
address=self._module.params.get('mac_address')
) if self._module.params.get('mac_address') else None,
)
def update_check(self, entity):
if self._module.params.get('vm'):
return (
equal(self._module.params.get('interface'), str(entity.interface)) and
equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile)) and
equal(self._module.params.get('mac_address'), entity.mac.address)
)
elif self._module.params.get('template'):
return (
equal(self._module.params.get('interface'), str(entity.interface)) and
equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'plugged', 'present', 'unplugged']),
vm=dict(type='str'),
template=dict(type='str'),
name=dict(type='str', required=True),
interface=dict(type='str'),
profile=dict(type='str'),
network=dict(type='str'),
mac_address=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['vm', 'template']],
)
if module._name == 'ovirt_nics':
module.deprecate("The 'ovirt_nics' module is being renamed 'ovirt_nic'", version=2.8)
check_sdk(module)
try:
# Locate the service that manages the virtual machines and use it to
# search for the NIC:
auth = module.params.pop('auth')
connection = create_connection(auth)
entity_name = None
if module.params.get('vm'):
# Locate the VM, where we will manage NICs:
entity_name = module.params.get('vm')
collection_service = connection.system_service().vms_service()
elif module.params.get('template'):
entity_name = module.params.get('template')
collection_service = connection.system_service().templates_service()
# TODO: We have to modify the search_by_name function to accept raise_error=True/False,
entity = search_by_name(collection_service, entity_name)
if entity is None:
raise Exception("Vm/Template '%s' was not found." % entity_name)
service = collection_service.service(entity.id)
cluster_id = entity.cluster
nics_service = service.nics_service()
entitynics_module = EntityNicsModule(
connection=connection,
module=module,
service=nics_service,
)
# Find vNIC id of the network interface (if any):
profile = module.params.get('profile')
if profile and module.params['network']:
cluster_name = get_link_name(connection, cluster_id)
dcs_service = connection.system_service().data_centers_service()
dc = dcs_service.list(search='Clusters.name=%s' % cluster_name)[0]
networks_service = dcs_service.service(dc.id).networks_service()
network = next(
(n for n in networks_service.list()
if n.name == module.params['network']),
None
)
if network is None:
raise Exception(
"Network '%s' was not found in datacenter '%s'." % (
module.params['network'],
dc.name
)
)
for vnic in connection.system_service().vnic_profiles_service().list():
if vnic.name == profile and vnic.network.id == network.id:
entitynics_module.vnic_id = vnic.id
# Handle appropriate action:
state = module.params['state']
if state == 'present':
ret = entitynics_module.create()
elif state == 'absent':
ret = entitynics_module.remove()
elif state == 'plugged':
entitynics_module.create()
ret = entitynics_module.action(
action='activate',
action_condition=lambda nic: not nic.plugged,
wait_condition=lambda nic: nic.plugged,
)
elif state == 'unplugged':
entitynics_module.create()
ret = entitynics_module.action(
action='deactivate',
action_condition=lambda nic: nic.plugged,
wait_condition=lambda nic: not nic.plugged,
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
BehavioralInsightsTeam/edx-platform | openedx/core/lib/xblock_builtin/xblock_discussion/xblock_discussion/__init__.py | 13 | 10830 | # -*- coding: utf-8 -*-
"""
Discussion XBlock
"""
import logging
import urllib
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from django.utils.translation import get_language_bidi
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import Scope, String, UNIQUE_ID
from web_fragments.fragment import Fragment
from xblockutils.resources import ResourceLoader
from xblockutils.studio_editable import StudioEditableXBlockMixin
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib.xblock_builtin import get_css_dependencies, get_js_dependencies
from xmodule.raw_module import RawDescriptor
from xmodule.xml_module import XmlParserMixin
log = logging.getLogger(__name__)
loader = ResourceLoader(__name__) # pylint: disable=invalid-name
def _(text):
"""
A noop underscore function that marks strings for extraction.
"""
return text
@XBlock.needs('user') # pylint: disable=abstract-method
@XBlock.needs('i18n')
class DiscussionXBlock(XBlock, StudioEditableXBlockMixin, XmlParserMixin):
"""
Provides a discussion forum that is inline with other content in the courseware.
"""
completion_mode = XBlockCompletionMode.EXCLUDED
discussion_id = String(scope=Scope.settings, default=UNIQUE_ID)
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
default="Discussion",
scope=Scope.settings
)
discussion_category = String(
display_name=_("Category"),
default=_("Week 1"),
help=_(
"A category name for the discussion. "
"This name appears in the left pane of the discussion forum for the course."
),
scope=Scope.settings
)
discussion_target = String(
display_name=_("Subcategory"),
default="Topic-Level Student-Visible Label",
help=_(
"A subcategory name for the discussion. "
"This name appears in the left pane of the discussion forum for the course."
),
scope=Scope.settings
)
sort_key = String(scope=Scope.settings)
editable_fields = ["display_name", "discussion_category", "discussion_target"]
has_author_view = True # Tells Studio to use author_view
# support for legacy OLX format - consumed by XmlParserMixin.load_metadata
metadata_translations = dict(RawDescriptor.metadata_translations)
metadata_translations['id'] = 'discussion_id'
metadata_translations['for'] = 'discussion_target'
@property
def course_key(self):
"""
:return: int course id
NB: The goal is to move this XBlock out of edx-platform, and so we use
scope_ids.usage_id instead of runtime.course_id so that the code will
continue to work with workbench-based testing.
"""
return getattr(self.scope_ids.usage_id, 'course_key', None)
@property
def django_user(self):
"""
Returns django user associated with user currently interacting
with the XBlock.
"""
user_service = self.runtime.service(self, 'user')
if not user_service:
return None
return user_service._django_user # pylint: disable=protected-access
@staticmethod
def vendor_js_dependencies():
"""
Returns list of vendor JS files that this XBlock depends on.
The helper function that it uses to obtain the list of vendor JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
return get_js_dependencies('discussion_vendor')
@staticmethod
def js_dependencies():
"""
Returns list of JS files that this XBlock depends on.
The helper function that it uses to obtain the list of JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
return get_js_dependencies('discussion')
@staticmethod
def css_dependencies():
"""
Returns list of CSS files that this XBlock depends on.
The helper function that it uses to obtain the list of CSS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
if get_language_bidi():
return get_css_dependencies('style-inline-discussion-rtl')
else:
return get_css_dependencies('style-inline-discussion')
def add_resource_urls(self, fragment):
"""
Adds URLs for JS and CSS resources that this XBlock depends on to `fragment`.
"""
# Head dependencies
for vendor_js_file in self.vendor_js_dependencies():
fragment.add_resource_url(staticfiles_storage.url(vendor_js_file), "application/javascript", "head")
for css_file in self.css_dependencies():
fragment.add_css_url(staticfiles_storage.url(css_file))
# Body dependencies
for js_file in self.js_dependencies():
fragment.add_javascript_url(staticfiles_storage.url(js_file))
def has_permission(self, permission):
"""
Encapsulates lms specific functionality, as `has_permission` is not
importable outside of lms context, namely in tests.
:param user:
:param str permission: Permission
:rtype: bool
"""
# normal import causes the xmodule_assets command to fail due to circular import - hence importing locally
from django_comment_client.permissions import has_permission
return has_permission(self.django_user, permission, self.course_key)
def student_view(self, context=None):
"""
Renders student view for LMS.
"""
fragment = Fragment()
self.add_resource_urls(fragment)
login_msg = ''
if not self.django_user.is_authenticated:
qs = urllib.urlencode({
'course_id': self.course_key,
'enrollment_action': 'enroll',
'email_opt_in': False,
})
login_msg = Text(_("You are not signed in. To view the discussion content, {sign_in_link} or "
"{register_link}, and enroll in this course.")).format(
sign_in_link=HTML('<a href="{url}">{sign_in_label}</a>').format(
sign_in_label=_('sign in'),
url='{}?{}'.format(reverse('signin_user'), qs),
),
register_link=HTML('<a href="/{url}">{register_label}</a>').format(
register_label=_('register'),
url='{}?{}'.format(reverse('register_user'), qs),
),
)
context = {
'discussion_id': self.discussion_id,
'display_name': self.display_name if self.display_name else _("Discussion"),
'user': self.django_user,
'course_id': self.course_key,
'discussion_category': self.discussion_category,
'discussion_target': self.discussion_target,
'can_create_thread': self.has_permission("create_thread"),
'can_create_comment': self.has_permission("create_comment"),
'can_create_subcomment': self.has_permission("create_sub_comment"),
'login_msg': login_msg,
}
fragment.add_content(self.runtime.render_template('discussion/_discussion_inline.html', context))
fragment.initialize_js('DiscussionInlineBlock')
return fragment
def author_view(self, context=None): # pylint: disable=unused-argument
"""
Renders author view for Studio.
"""
fragment = Fragment()
fragment.add_content(self.runtime.render_template(
'discussion/_discussion_inline_studio.html',
{'discussion_id': self.discussion_id}
))
return fragment
def student_view_data(self):
"""
Returns a JSON representation of the student_view of this XBlock.
"""
return {'topic_id': self.discussion_id}
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Parses OLX into XBlock.
This method is overridden here to allow parsing legacy OLX, coming from discussion XModule.
XBlock stores all the associated data, fields and children in a XML element inlined into vertical XML file
XModule stored only minimal data on the element included into vertical XML and used a dedicated "discussion"
folder in OLX to store fields and children. Also, some info was put into "policy.json" file.
If no external data sources are found (file in "discussion" folder), it is exactly equivalent to base method
XBlock.parse_xml. Otherwise this method parses file in "discussion" folder (known as definition_xml), applies
policy.json and updates fields accordingly.
"""
block = super(DiscussionXBlock, cls).parse_xml(node, runtime, keys, id_generator)
cls._apply_translations_to_node_attributes(block, node)
cls._apply_metadata_and_policy(block, node, runtime)
return block
@classmethod
def _apply_translations_to_node_attributes(cls, block, node):
"""
Applies metadata translations for attributes stored on an inlined XML element.
"""
for old_attr, target_attr in cls.metadata_translations.iteritems():
if old_attr in node.attrib and hasattr(block, target_attr):
setattr(block, target_attr, node.attrib[old_attr])
@classmethod
def _apply_metadata_and_policy(cls, block, node, runtime):
"""
Attempt to load definition XML from "discussion" folder in OLX, than parse it and update block fields
"""
try:
definition_xml, _ = cls.load_definition_xml(node, runtime, block.scope_ids.def_id)
except Exception as err: # pylint: disable=broad-except
log.info(
"Exception %s when trying to load definition xml for block %s - assuming XBlock export format",
err,
block
)
return
metadata = cls.load_metadata(definition_xml)
cls.apply_policy(metadata, runtime.get_policy(block.scope_ids.usage_id))
for field_name, value in metadata.iteritems():
if field_name in block.fields:
setattr(block, field_name, value)
| agpl-3.0 |
Kri-7-q/glimpse_client-1 | 3rdparty/breakpad/src/tools/gyp/test/mac/gyptest-framework-headers.py | 344 | 1103 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that mac_framework_headers works properly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# TODO(thakis): Make this work with ninja, make. http://crbug.com/129013
test = TestGyp.TestGyp(formats=['xcode'])
CHDIR = 'framework-headers'
test.run_gyp('test.gyp', chdir=CHDIR)
# Test that headers are installed for frameworks
test.build('test.gyp', 'test_framework_headers_framework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/TestFramework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/Headers/myframework.h', chdir=CHDIR)
# Test that headers are installed for static libraries.
test.build('test.gyp', 'test_framework_headers_static', chdir=CHDIR)
test.built_file_must_exist('libTestLibrary.a', chdir=CHDIR)
test.built_file_must_exist('include/myframework.h', chdir=CHDIR)
test.pass_test()
| bsd-3-clause |
edx/edx-ora | controller/migrations/0031_auto__del_notificationsseen__chg_field_submission_xqueue_submission_id.py | 1 | 9854 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'NotificationsSeen'
db.delete_table('controller_notificationsseen')
# Changing field 'Submission.xqueue_submission_id'
db.alter_column('controller_submission', 'xqueue_submission_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128))
# Adding unique constraint on 'Submission', fields ['xqueue_submission_id']
db.create_unique('controller_submission', ['xqueue_submission_id'])
def backwards(self, orm):
# Removing unique constraint on 'Submission', fields ['xqueue_submission_id']
db.delete_unique('controller_submission', ['xqueue_submission_id'])
# Adding model 'NotificationsSeen'
db.create_table('controller_notificationsseen', (
('notification_type', self.gf('django.db.models.fields.CharField')(max_length=128)),
('location', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=128)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('student_id', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('controller', ['NotificationsSeen'])
# Changing field 'Submission.xqueue_submission_id'
db.alter_column('controller_submission', 'xqueue_submission_id', self.gf('django.db.models.fields.CharField')(max_length=1024))
models = {
'controller.grader': {
'Meta': {'object_name': 'Grader'},
'confidence': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '9'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feedback': ('django.db.models.fields.TextField', [], {}),
'grader_id': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1024'}),
'grader_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_calibration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['controller.Submission']"})
},
'controller.message': {
'Meta': {'object_name': 'Message'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'grader': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['controller.Grader']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'originator': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'controller.rubric': {
'Meta': {'object_name': 'Rubric'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'finished_scoring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grader': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['controller.Grader']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rubric_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'controller.rubricitem': {
'Meta': {'object_name': 'RubricItem'},
'comment': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'finished_scoring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_number': ('django.db.models.fields.IntegerField', [], {}),
'max_score': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['controller.Rubric']"}),
'score': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2'}),
'short_text': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '1024'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'controller.rubricoption': {
'Meta': {'object_name': 'RubricOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_number': ('django.db.models.fields.IntegerField', [], {}),
'points': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '10', 'decimal_places': '2'}),
'rubric_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['controller.RubricItem']"}),
'short_text': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '128'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'controller.submission': {
'Meta': {'object_name': 'Submission'},
'answer': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'control_fields': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'duplicate_submission_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'grader_settings': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'has_been_duplicate_checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_display': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'is_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_plagiarized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '128', 'db_index': 'True'}),
'max_score': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'next_grader_type': ('django.db.models.fields.CharField', [], {'default': "u'NA'", 'max_length': '2'}),
'posted_results_back_to_queue': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferred_grader_type': ('django.db.models.fields.CharField', [], {'default': "u'NA'", 'max_length': '2'}),
'previous_grader_type': ('django.db.models.fields.CharField', [], {'default': "u'NA'", 'max_length': '2'}),
'problem_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'prompt': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'rubric': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'skip_basic_checks': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_response': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'student_submission_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'xqueue_queue_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '128'}),
'xqueue_submission_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'xqueue_submission_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '1024'})
}
}
complete_apps = ['controller'] | agpl-3.0 |
octobot-dev/pulpo-forms-django | statistics/CheckboxStatistics.py | 2 | 1212 | from pulpo_forms.statistics.serializers import ListStatisticsSerializer
class CheckboxStatistics():
def __init__(self, data_list, options):
self.total_per_option = []
self.options = []
self.total_filled = 0
self.total_not_filled = 0
# Initiate lists
for option in options:
self.total_per_option.append(0)
self.options.append(option["label"])
# Count and remove null values
# Count not null values data and insert them into an auxiliary list
aux_list = []
for data in data_list:
if data != "":
aux_list += data.split("#")
self.total_filled += 1
else:
self.total_not_filled += 1
total_options = len(options)
for data in aux_list:
pos = 0
while (pos != total_options) and (int(data) != options[pos]["id"]):
pos += 1
if pos != total_options:
self.total_per_option[pos] += 1
else:
raise Exception("Data does not match with any option")
def getSerializedData(self):
return ListStatisticsSerializer(self).data
| apache-2.0 |
alvin319/CarnotKE | jyhton/lib-python/2.7/encodings/iso2022_jp_ext.py | 816 | 1069 | #
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
rwboyer/marilyn-project | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/html.py | 94 | 31067 | # -*- coding: utf-8 -*-
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import os.path
import io
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes
try:
import ctags
except ImportError:
ctags = None
__all__ = ['HtmlFormatter']
_escape_html_table = {
ord('&'): '&',
ord('<'): '<',
ord('>'): '>',
ord('"'): '"',
ord("'"): ''',
}
def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
def get_random_id():
"""Return a random id for javascript fields."""
from random import random
from time import time
try:
from hashlib import sha1 as sha
except ImportError:
import sha
sha = sha.new
return sha('%s|%s' % (random(), time())).hexdigest()
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
When `tagsfile` is set to the path of a ctags index file, it is used to
generate hyperlinks from names to their definition. You must enable
`anchorlines` and run ctags with the `-n` option for this to work. The
`python-ctags` module from PyPI must be installed to use this feature;
otherwise a `RuntimeError` will be raised.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags will not use CSS classes, but
inline styles. This is not recommended for larger pieces of code since
it increases output size by quite a bit (default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
*New in Pygments 0.9:* If you select the ``'table'`` line numbers, the
wrapping table will have a CSS class of this string plus ``'table'``,
the default is accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``). *New in
Pygments 0.11.*
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file. *New in Pygments 0.6.*
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
*New in Pygments 1.1.*
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 0.11.*
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``). *New in
Pygments 0.6.*
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks. *New in Pygments
0.7.*
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
This allows easy linking to certain lines. *New in Pygments 0.9.*
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
This allows easy access to lines via javascript. *New in Pygments 1.6.*
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
`tagsfile`
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
*New in Pygments 1.6.*
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
*New in Pygments 1.6.*
**Subclassing the HTML formatter**
*New in Pygments 0.7.*
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
if self.tagsfile:
if not ctags:
raise RuntimeError('The "ctags" package must to be installed '
'to be able to use the "tagsfile" feature.')
self._ctags = ctags.CTags(self.tagsfile)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.linespans = options.get('linespans', '')
self.anchorlinenos = options.get('anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: #%s; ' % ndef['color']
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: #%s; ' % ndef['bgcolor']
if ndef['border']:
style += 'border: 1px solid #%s; ' % ndef['border']
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, str):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.items()
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print('Note: Cannot determine output file name, ' \
'using current directory as base for the CSS file name', file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
cf = open(cssfilename, "w")
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title = self.title,
cssfile = self.cssfile,
encoding = self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title = self.title,
styledefs = self.get_style_defs('body'),
encoding = self.encoding))
for t, line in inner:
yield t, line
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = io.StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
if sp:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if i % sp == 0:
if aln:
lines.append('<a href="#%s-%d" class="special">%*d</a>' %
(la, i, mw, i))
else:
lines.append('<span class="special">%*d</span>' % (mw, i))
else:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
else:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
if nocls:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td><div class="linenodiv" '
'style="background-color: #f0f0f0; padding-right: 10px">'
'<pre style="line-height: 125%">' +
ls + '</pre></div></td><td class="code">')
else:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, dummyoutfile.getvalue()
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(lines) + num - 1))
if self.noclasses:
if sp:
for t, line in lines:
if num%sp == 0:
style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
else:
style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
yield 1, '<span style="%s">%*s</span> ' % (
style, mw, (num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, ('<span style="background-color: #f0f0f0; '
'padding: 0 5px 0 5px">%*s</span> ' % (
mw, (num%st and ' ' or num)) + line)
num += 1
elif sp:
for t, line in lines:
yield 1, '<span class="lineno%s">%*s</span> ' % (
num%sp == 0 and ' special' or '', mw,
(num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, '<span class="lineno">%*s</span> ' % (
mw, (num%st and ' ' or num)) + line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
i = self.linenostart - 1 # subtract 1 since we have to increment i
# *before* yielding
for t, line in inner:
if t:
i += 1
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
else:
yield 0, line
def _wrap_linespans(self, inner):
s = self.linespans
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass)
+ (style and (' style="%s"' % style)) + '>')
for tup in inner:
yield tup
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield tup
yield 0, '</pre>'
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = ''
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_class(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line += (lspan and '</span>') + cspan + part + \
(cspan and '</span>') + lsep
else: # both are the same
line += part + (lspan and '</span>') + lsep
yield 1, line
line = ''
elif part:
yield 1, cspan + part + (cspan and '</span>') + lsep
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line += (lspan and '</span>') + cspan + parts[-1]
lspan = cspan
else:
line += parts[-1]
elif parts[-1]:
line = cspan + parts[-1]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
yield 1, line + (lspan and '</span>') + lsep
def _lookup_ctag(self, token):
entry = ctags.TagEntry()
if self._ctags.find(entry, token, 0):
return entry['file'], entry['lineNumber']
else:
return None, None
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
| mit |
lazyparser/FuzzManager | FTB/ConfigurationFiles.py | 3 | 1922 | #!/usr/bin/env python
# encoding: utf-8
'''
ConfigurationFiles -- Generic class used in FuzzManager to read one or more configuration files
@author: Christian Holler (:decoder)
@license:
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
@contact: choller@mozilla.com
'''
# Ensure print() compatibility with Python 3
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import sys
class ConfigurationFiles():
def __init__(self, configFiles):
self.mainConfig = {}
self.metadataConfig = {}
if configFiles:
self.parser = configparser.ConfigParser()
# Make sure keys are kept case-sensitive
self.parser.optionxform = str
self.parser.read(configFiles)
self.mainConfig = self.getSectionMap("Main")
self.metadataConfig = self.getSectionMap("Metadata")
# Produce warnings for unrecognized sections to make
# debugging easier. Especially main vs. Main is hard
# to figure out sometimes.
sections = self.parser.sections()
for section in ["Main", "Metadata"]:
if section in sections:
sections.remove(section)
if sections:
print("Warning: Ignoring the following config file sections: %s" % " ".join(sections), file=sys.stderr)
def getSectionMap(self, section):
ret = {}
try:
options = self.parser.options(section)
except configparser.NoSectionError:
return {}
for o in options:
ret[o] = self.parser.get(section, o)
return ret | mpl-2.0 |
kulawczukmarcin/mypox | pox/log/color.py | 46 | 5311 | # Copyright 2011 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: Not platform independent -- uses VT escape codes
# Magic sequence used to introduce a command or color
MAGIC = "@@@"
# Colors for log levels
LEVEL_COLORS = {
'DEBUG': 'CYAN',
'INFO': 'GREEN',
'WARNING': 'YELLOW',
'ERROR': 'RED',
'CRITICAL': 'blink@@@RED',
}
# Will get set to True if module is initialized
enabled = False
# Gets set to True if we should strip special sequences but
# not actually try to colorize
_strip_only = False
import logging
import sys
# Name to (intensity, base_value) (more colors added later)
COLORS = {
'black' : (0,0),
'red' : (0,1),
'green' : (0,2),
'yellow' : (0,3),
'blue' : (0,4),
'magenta' : (0,5),
'cyan' : (0,6),
'gray' : (0,7),
'darkgray' : (1,0),
'pink' : (1,1),
'white' : (1,7),
}
# Add intense/bold colors (names it capitals)
for _c in [_n for _n,_v in COLORS.items() if _v[0] == 0]:
COLORS[_c.upper()] = (1,COLORS[_c][1])
COMMANDS = {
'reset' : 0,
'bold' : 1,
'dim' : 2,
'bright' : 1,
'dull' : 2,
'bright:' : 1,
'dull:' : 2,
'blink' : 5,
'BLINK' : 6,
'invert' : 7,
'bg:' : -1, # Special
'level' : -2, # Special -- color of current level
'normal' : 22,
'underline' : 4,
'nounderline' : 24,
}
# Control Sequence Introducer
CSI = "\033["
def _color (color, msg):
""" Colorizes the given text """
return _proc(MAGIC + color) + msg + _proc(MAGIC + 'reset').lower()
def _proc (msg, level_color = "DEBUG"):
"""
Do some replacements on the text
"""
msg = msg.split(MAGIC)
#print "proc:",msg
r = ''
i = 0
cmd = False
while i < len(msg):
m = msg[i]
#print i,m
i += 1
if cmd:
best = None
bestlen = 0
for k,v in COMMANDS.iteritems():
if len(k) > bestlen:
if m.startswith(k):
best = (k,v)
bestlen = len(k)
special = None
if best is not None and best[0].endswith(':'):
special = best
m = m[bestlen:]
best = None
bestlen = 0
for k,v in COLORS.iteritems():
if len(k) > bestlen:
if m.startswith(k):
best = (k,v)
bestlen = len(k)
if best is not None:
#print "COMMAND", best
m = m[bestlen:]
if type(best[1]) is tuple:
# Color
brightness,color = best[1]
if special is not None:
if special[1] == -1:
brightness = None
color += 10
color += 30
if not _strip_only:
r += CSI
if brightness is not None:
r += str(brightness) + ";"
r += str(color) + "m"
elif not _strip_only:
# Command
if best[1] == -2:
r += _proc(MAGIC + LEVEL_COLORS.get(level_color, ""), level_color)
else:
r += CSI + str(best[1]) + "m"
cmd = True
r += m
return r
def launch (entire=False):
"""
If --entire then the whole message is color-coded, otherwise just the
log level.
Also turns on interpretation of some special sequences in the log
format string. For example, try:
log --format="%(levelname)s: @@@bold%(message)s@@@normal" log.color
"""
global enabled
if enabled: return
from pox.core import core
log = core.getLogger()
windows_hack = False
# Try to work on Windows
if sys.platform == "win32":
try:
from colorama import init
windows_hack = True
init()
except:
log.info("You need colorama if you want color logging on Windows")
global _strip_only
_strip_only = True
from pox.core import _default_log_handler as dlf
if not dlf:
log.warning("Color logging disabled -- no default logger found")
return
#if not hasattr(dlf, 'formatter'):
# log.warning("Color logging disabled -- no formatter found")
# return
#if not hasattr(dlf.formatter, '_fmt'):
# log.warning("Color logging disabled -- formatter unrecognized")
# return
# Monkeypatch in a new format function...
old_format = dlf.format
if entire:
def new_format (record):
msg = _proc(old_format(record), record.levelname)
color = LEVEL_COLORS.get(record.levelname)
if color is None:
return msg
return _color(color, msg)
else:
def new_format (record):
color = LEVEL_COLORS.get(record.levelname)
oldlevelname = record.levelname
if color is not None:
record.levelname = _color(color, record.levelname)
r = _proc(old_format(record), oldlevelname)
record.levelname = oldlevelname
return r
dlf.format = new_format
if windows_hack:
if hasattr(dlf, "stream"):
if dlf.stream is sys.__stderr__:
dlf.stream = sys.stderr
enabled = True
else:
enabled = True
| apache-2.0 |
ClimbsRocks/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
netvigator/myPyPacks | pyPacks/Dict/Extend.py | 2 | 6012 | #!/usr/bin/pythonTest
# -*- coding: utf-8 -*-
#
# dict functions Extend
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# The GNU General Public License is available from:
# The Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston MA 02110-1301 USA
#
# http://www.gnu.org/licenses/gpl.html
#
# Copyright 2004-2012 Rick Graves
#
class returnValueOrArg( dict ):
#
'''
dictonary with getVorA() method -- see docstring for method
name is short for "return value or argument"
'''
#
def getVorA( self, k ):
#
'''
pass one argument to the method
getVorA() returns the value if the argument is a key
otherwise, it returns the argument
name is short for "get value or argument"
'''
#
return self.get( k, k )
class DoubleDictClass( dict ):
#
"""
For fast lookups of keys and values.
Implemented by having a second dict with keys as values and values as keys.
This code assumes the values are unique.
If the above does not apply, this class needs updating.
"""
#
def __init__( self, dThis = {} ):
#
self.update( dThis )
#
self.UpdateReverseFromDict()
#
#
def Update( self, uKey, uValue ):
#
self[ uKey ] = uValue
#
self.dReverse[ uValue ] = uKey
#
def UpdateReverseFromDict( self ):
#
from Dict.Get import getReverseDictGotUniqueItems
#
self.dReverse = getReverseDictGotUniqueItems( self )
#
def hasValue( self, uValue ):
#
return uValue in self.dReverse
#
def hasSomething( self, uSomething ):
#
return uSomething in self or uSomething in self.dReverse
#
def hasKey( self, uSomething ):
#
return uSomething in self
#
def getKeyFromValue( self, uValue ):
#
return self.dReverse.get( uValue )
#
def getValueFromKey( self, uKey ): # just a wrapper for get()
#
return self.get( uKey )
class getClosestClass( dict ): # not used anywhere
#
"""Returns key and value for key or value that is close to
the look up value -- look up value can be shorter than actual key.
Example, look up value abc will find key abcde.
Known to work with string keys, may work with other types also.
Example application is class Countries in getMamboMirrors.
Here is the rub: all values must be unique."""
#
def __init__( self, dLookIn ):
#
from Dict.Get import getItemIter, getValueList
#
self.update( dLookIn )
#
self.dReverse = {}
#
for sKey, sValue in getItemIter( self ):
#
self.dReverse[ sValue ] = sKey # if you have the value, look up the key
#
#
self.lValues = getValueList( self )
#
self.lValues.sort()
#
def getValue4Key( self, sLook4This ):
#
import bisect
#
uValue = None
sKey = None
#
if sLook4This in self:
#
sKey = sLook4This
#
uValue = self[ sLook4This ]
#
elif sLook4This in self.dReverse:
#
uValue = sLook4This
#
sKey = self.dReverse[ sLook4This ]
#
else:
#
iNext = bisect.bisect( self.lValues, sLook4This )
#
# bisect = bisect_right
#
if iNext < len( self.lValues ) and \
sLook4This in self.lValues[ iNext ]:
#
uValue = self.lValues[ iNext ]
#
sKey = self.dReverse[ uValue ]
#
return sKey, uValue
if __name__ == "__main__":
#
lProblems = []
#
from Collect.Test import AllMeet
from Dict.Get import getKeyIter, getItemIter, getValueIter
from Utils.Result import sayTestResult
#
dTest = dict( a = 1, b = 2, c = 3, d = 4, e = 5, f = 6, g = 7, h = 8 )
#
dReverse = DoubleDictClass( dTest )
#
def isRightValue( value ): return dReverse[ dReverse.getKeyFromValue( value ) ] == value
#
if not AllMeet( getValueIter( dTest ), dReverse.hasValue ) or \
not AllMeet( getValueIter( dTest ), dReverse.hasSomething ) or \
not AllMeet( getKeyIter( dTest ), dReverse.hasKey ) or \
not AllMeet( getKeyIter( dTest ), dReverse.hasSomething ) or \
not AllMeet( getValueIter( dTest ), isRightValue ):
#
lProblems.append( 'DoubleDictClass()' )
#
#
dClose = getClosestClass( dTest )
#
setItems = frozenset( getItemIter( dTest ) )
#
def GotItem( t ): return t in setItems
#
def hasItemForKey( key ): return GotItem( dClose.getValue4Key( key ) )
#
if not AllMeet( getKeyIter( dTest ), hasItemForKey ):
#
lProblems.append( 'GetClosestClass()' )
#
#
dTest = returnValueOrArg()
#
dTest[ 'toast' ] = 'beans on toast'
#
if dTest.getVorA( 'toast' ) != 'beans on toast':
#
lProblems.append( 'returnValueOrArg() has key' )
#
#
if dTest.getVorA( 'spam' ) != 'spam':
#
lProblems.append( 'returnValueOrArg() does not have key' )
#
#
sayTestResult( lProblems ) | gpl-2.0 |
shujunqiao/cocos2d-python | cocos/rect.py | 4 | 11068 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
class Rect(object):
'''Define a rectangular area.
Many convenience handles and other properties are also defined - all of
which may be assigned to which will result in altering the position
and sometimes dimensions of the Rect:
- top -- y pixel extent
- bottom -- y pixel extent
- left -- x pixel extent
- right -- x pixel extent
- position -- (x, y) of bottom-left corner pixel
- origin -- (x, y) of bottom-left corner pixel
- center -- (x, y) of center pixel
- topleft -- (x, y) of top-left corner pixel
- topright -- (x, y) of top-right corner pixel
- bottomleft -- (x, y) of bottom-left corner pixel
- bottomright -- (x, y) of bottom-right corner pixel
- midtop -- (x, y) of middle of top side pixel
- midbottom -- (x, y) of middle of bottom side pixel
- midleft -- (x, y) of middle of left side pixel
- midright -- (x, y) of middle of right side pixel
- size -- (width, height) of rect
The Rect area includes the bottom and left borders but not the top and
right borders.
'''
def __init__(self, x, y, width, height):
'''Create a Rect with the bottom-left corner at (x, y) and
dimensions (width, height).
'''
self._x, self._y = x, y
self._width, self._height = width, height
def __nonzero__(self):
return bool(self.width and self.height)
def __repr__(self):
return 'Rect(xy=%.4g,%.4g; wh=%.4g,%.4g)'%(self.x, self.y,
self.width, self.height)
def __eq__(self, other):
'''Compare the two rects.
>>> r1 = Rect(0, 0, 10, 10)
>>> r1 == Rect(0, 0, 10, 10)
True
>>> r1 == Rect(1, 0, 10, 10)
False
>>> r1 == Rect(0, 1, 10, 10)
False
>>> r1 == Rect(0, 0, 11, 10)
False
>>> r1 == Rect(0, 0, 10, 11)
False
'''
return (self.x == other.x and self.y == other.y and
self.width == other.width and self.height == other.height)
# py3 compatiblity: obj that defines __eq__ needs to define __hash__ to be
# hashable, and we need that class RectCell(Rect, Cell) be hashable
__hash__ = object.__hash__
def __ne__(self, other):
'''Compare the two rects.
>>> r1 = Rect(0, 0, 10, 10)
>>> r1 != Rect(0, 0, 10, 10)
False
>>> r1 != Rect(1, 0, 10, 10)
True
>>> r1 != Rect(0, 1, 10, 10)
True
>>> r1 != Rect(0, 0, 11, 10)
True
>>> r1 != Rect(0, 0, 10, 11)
True
'''
return not (self == other)
def copy(self):
return self.__class__(self.x, self.y, self.width, self.height)
# the following four properties will most likely be overridden in a
# subclass
def set_x(self, value): self._x = value
x = property(lambda self: self._x, set_x)
def set_y(self, value): self._y = value
y = property(lambda self: self._y, set_y)
def set_width(self, value): self._width = value
width = property(lambda self: self._width, set_width)
def set_height(self, value): self._height = value
height = property(lambda self: self._height, set_height)
def contains(self, x, y):
'''Return boolean whether the point defined by x, y is inside the
rect area.
'''
if x < self.x or x > self.x + self.width: return False
if y < self.y or y > self.y + self.height: return False
return True
def intersects(self, other):
'''Return boolean whether the "other" rect (an object with .x, .y,
.width and .height attributes) overlaps this Rect in any way.
'''
if self.x + self.width < other.x: return False
if other.x + other.width < self.x: return False
if self.y + self.height < other.y: return False
if other.y + other.height < self.y: return False
return True
def clippedBy(self, other):
'''Determine whether this rect is clipped by the other rect.
>>> r1 = Rect(0, 0, 10, 10)
>>> r2 = Rect(1, 1, 9, 9)
>>> r2.clippedBy(r1) # r2 fits inside r1
False
>>> r1.clippedBy(r2) # r1 is clipped by r2
True
>>> r2 = Rect(1, 1, 11, 11)
>>> r1.intersect(r2)
Rect(xy=1,1; wh=9,9)
>>> r1.clippedBy(r2)
True
>>> r2.intersect(r1)
Rect(xy=1,1; wh=9,9)
>>> r2.clippedBy(r1)
True
>>> r2 = Rect(11, 11, 1, 1)
>>> r1.clippedBy(r2)
True
'''
if self.intersects(other): return True
if i.x > self.x: return True
if i.y > self.y: return True
if i.width < self.width: return True
if i.height < self.height: return True
return False
def intersect(self, other):
'''Find the intersection of two Rects.
>>> r1 = Rect(0, 51, 200, 17)
>>> r2 = Rect(0, 64, 200, 55)
>>> r1.intersect(r2)
Rect(xy=0,64; wh=200,4)
>>> r1 = Rect(0, 64, 200, 55)
>>> r2 = Rect(0, 0, 200, 17)
>>> print r1.intersect(r2)
None
>>> r1 = Rect(10, 10, 10, 10)
>>> r2 = Rect(20, 20, 10, 10)
>>> print r1.intersect(r2)
None
>>> bool(Rect(0, 0, 1, 1))
True
>>> bool(Rect(0, 0, 1, 0))
False
>>> bool(Rect(0, 0, 0, 1))
False
>>> bool(Rect(0, 0, 0, 0))
False
'''
s_tr_x, s_tr_y = self.topright
o_tr_x, o_tr_y = other.topright
bl_x = max(self.x, other.x)
bl_y = max(self.y, other.y)
tr_x = min(s_tr_x, o_tr_x)
tr_y = min(s_tr_y, o_tr_y)
w, h = max(0, tr_x-bl_x), max(0, tr_y-bl_y)
if not w or not h:
return None
return self.__class__(bl_x, bl_y, w, h)
def set_position(self, value): self._x, self._y = value
position = property(lambda self: (self._x, self._y), set_position)
def set_size(self, value): self._width, self._height = value
size = property(lambda self: (self._width, self._height), set_size)
def get_origin(self): return self.x, self.y
def set_origin(self, origin): self.x, self.y = origin
origin = property(get_origin, set_origin)
def get_top(self): return self.y + self.height
def set_top(self, y): self.y = y - self.height
top = property(get_top, set_top)
# r/w, in pixels, y extent
def get_bottom(self): return self.y
def set_bottom(self, y): self.y = y
bottom = property(get_bottom, set_bottom)
def get_left(self): return self.x
def set_left(self, x): self.x = x
left = property(get_left, set_left)
def get_right(self): return self.x + self.width
def set_right(self, x): self.x = x - self.width
right = property(get_right, set_right)
def get_center(self):
return (self.x + self.width//2, self.y + self.height//2)
def set_center(self, center):
x, y = center
self.position = (x - self.width//2, y - self.height//2.0)
center = property(get_center, set_center)
def get_midtop(self):
return (self.x + self.width//2, self.y + self.height)
def set_midtop(self, midtop):
x, y = midtop
self.position = (x - self.width//2, y - self.height)
midtop = property(get_midtop, set_midtop)
def get_midbottom(self):
return (self.x + self.width//2, self.y)
def set_midbottom(self, midbottom):
x, y = midbottom
self.position = (x - self.width//2, y)
midbottom = property(get_midbottom, set_midbottom)
def get_midleft(self):
return (self.x, self.y + self.height//2)
def set_midleft(self, midleft):
x, y = midleft
self.position = (x, y - self.height//2)
midleft = property(get_midleft, set_midleft)
def get_midright(self):
return (self.x + self.width, self.y + self.height//2)
def set_midright(self, midright):
x, y = midright
self.position = (x - self.width, y - self.height//2)
midright = property(get_midright, set_midright)
def get_topleft(self):
return (self.x, self.y + self.height)
def set_topleft(self, position):
x, y = position
self.position = (x, y - self.height)
topleft = property(get_topleft, set_topleft)
def get_topright(self):
return (self.x + self.width, self.y + self.height)
def set_topright(self, position):
x, y = position
self.position = (x - self.width, y - self.height)
topright = property(get_topright, set_topright)
def get_bottomright(self):
return (self.x + self.width, self.y)
def set_bottomright(self, position):
x, y = position
self.position = (x - self.width, y)
bottomright = property(get_bottomright, set_bottomright)
def get_bottomleft(self):
return (self.x, self.y)
def set_bottomleft(self, position):
self.x, self.y = position
bottomleft = property(get_bottomleft, set_bottomleft)
| bsd-3-clause |
alu042/edx-platform | cms/djangoapps/contentstore/views/tests/test_course_updates.py | 20 | 13921 | """
unit tests for course_info views and models.
"""
import json
from mock import patch
from django.test.utils import override_settings
from contentstore.models import PushNotificationConfig
from contentstore.tests.test_course_settings import CourseTestCase
from contentstore.utils import reverse_course_url, reverse_usage_url
from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore.django import modulestore
class CourseUpdateTest(CourseTestCase):
def create_update_url(self, provided_id=None, course_key=None):
if course_key is None:
course_key = self.course.id
kwargs = {'provided_id': str(provided_id)} if provided_id else None
return reverse_course_url('course_info_update_handler', course_key, kwargs=kwargs)
# The do all and end all of unit test cases.
def test_course_update(self):
"""Go through each interface and ensure it works."""
def get_response(content, date):
"""
Helper method for making call to server and returning response.
Does not supply a provided_id.
"""
payload = {'content': content, 'date': date}
url = self.create_update_url()
resp = self.client.ajax_post(url, payload)
self.assertContains(resp, '', status_code=200)
return json.loads(resp.content)
resp = self.client.get_html(
reverse_course_url('course_info_handler', self.course.id)
)
self.assertContains(resp, 'Course Updates', status_code=200)
init_content = '<iframe width="560" height="315" src="http://www.youtube.com/embed/RocY-Jd93XU" frameborder="0">'
content = init_content + '</iframe>'
payload = get_response(content, 'January 8, 2013')
self.assertHTMLEqual(payload['content'], content)
first_update_url = self.create_update_url(provided_id=payload['id'])
content += '<div>div <p>p<br/></p></div>'
payload['content'] = content
# POST requests were coming in w/ these header values causing an error; so, repro error here
resp = self.client.ajax_post(
first_update_url, payload, HTTP_X_HTTP_METHOD_OVERRIDE="PUT", REQUEST_METHOD="POST"
)
self.assertHTMLEqual(content, json.loads(resp.content)['content'],
"iframe w/ div")
# refetch using provided id
refetched = self.client.get_json(first_update_url)
self.assertHTMLEqual(
content, json.loads(refetched.content)['content'], "get w/ provided id"
)
# now put in an evil update
content = '<ol/>'
payload = get_response(content, 'January 11, 2013')
self.assertHTMLEqual(content, payload['content'], "self closing ol")
course_update_url = self.create_update_url()
resp = self.client.get_json(course_update_url)
payload = json.loads(resp.content)
self.assertTrue(len(payload) == 2)
# try json w/o required fields
self.assertContains(
self.client.ajax_post(course_update_url, {'garbage': 1}),
'Failed to save', status_code=400
)
# test an update with text in the tail of the header
content = 'outside <strong>inside</strong> after'
payload = get_response(content, 'June 22, 2000')
self.assertHTMLEqual(content, payload['content'], "text outside tag")
# now try to update a non-existent update
content = 'blah blah'
payload = {'content': content, 'date': 'January 21, 2013'}
self.assertContains(
self.client.ajax_post(course_update_url + '9', payload),
'Failed to save', status_code=400
)
# update w/ malformed html
content = '<garbage tag No closing brace to force <span>error</span>'
payload = {'content': content,
'date': 'January 11, 2013'}
self.assertContains(
self.client.ajax_post(course_update_url, payload),
'<garbage'
)
# set to valid html which would break an xml parser
content = "<p><br><br></p>"
payload = get_response(content, 'January 11, 2013')
self.assertHTMLEqual(content, payload['content'])
# now try to delete a non-existent update
self.assertContains(self.client.delete(course_update_url + '19'), "delete", status_code=400)
# now delete a real update
content = 'blah blah'
payload = get_response(content, 'January 28, 2013')
this_id = payload['id']
self.assertHTMLEqual(content, payload['content'], "single iframe")
# first count the entries
resp = self.client.get_json(course_update_url)
payload = json.loads(resp.content)
before_delete = len(payload)
url = self.create_update_url(provided_id=this_id)
resp = self.client.delete(url)
payload = json.loads(resp.content)
self.assertTrue(len(payload) == before_delete - 1)
def test_course_updates_compatibility(self):
'''
Test that course updates doesn't break on old data (content in 'data' field).
Note: new data will save as list in 'items' field.
'''
# get the updates and populate 'data' field with some data.
location = self.course.id.make_usage_key('course_info', 'updates')
course_updates = modulestore().create_item(
self.user.id,
location.course_key,
location.block_type,
block_id=location.block_id
)
update_date = u"January 23, 2014"
update_content = u"Hello world!"
update_data = u"<ol><li><h2>" + update_date + "</h2>" + update_content + "</li></ol>"
course_updates.data = update_data
modulestore().update_item(course_updates, self.user.id)
# test getting all updates list
course_update_url = self.create_update_url()
resp = self.client.get_json(course_update_url)
payload = json.loads(resp.content)
self.assertEqual(payload, [{u'date': update_date, u'content': update_content, u'id': 1}])
self.assertTrue(len(payload) == 1)
# test getting single update item
first_update_url = self.create_update_url(provided_id=payload[0]['id'])
resp = self.client.get_json(first_update_url)
payload = json.loads(resp.content)
self.assertEqual(payload, {u'date': u'January 23, 2014', u'content': u'Hello world!', u'id': 1})
self.assertHTMLEqual(update_date, payload['date'])
self.assertHTMLEqual(update_content, payload['content'])
# test that while updating it converts old data (with string format in 'data' field)
# to new data (with list format in 'items' field) and respectively updates 'data' field.
course_updates = modulestore().get_item(location)
self.assertEqual(course_updates.items, [])
# now try to update first update item
update_content = 'Testing'
payload = {'content': update_content, 'date': update_date}
resp = self.client.ajax_post(
course_update_url + '1', payload, HTTP_X_HTTP_METHOD_OVERRIDE="PUT", REQUEST_METHOD="POST"
)
self.assertHTMLEqual(update_content, json.loads(resp.content)['content'])
course_updates = modulestore().get_item(location)
self.assertEqual(course_updates.items, [{u'date': update_date, u'content': update_content, u'id': 1}])
# course_updates 'data' field should not update automatically
self.assertEqual(course_updates.data, '')
# test delete course update item (soft delete)
course_updates = modulestore().get_item(location)
self.assertEqual(course_updates.items, [{u'date': update_date, u'content': update_content, u'id': 1}])
# now try to delete first update item
resp = self.client.delete(course_update_url + '1')
self.assertEqual(json.loads(resp.content), [])
# confirm that course update is soft deleted ('status' flag set to 'deleted') in db
course_updates = modulestore().get_item(location)
self.assertEqual(course_updates.items,
[{u'date': update_date, u'content': update_content, u'id': 1, u'status': 'deleted'}])
# now try to get deleted update
resp = self.client.get_json(course_update_url + '1')
payload = json.loads(resp.content)
self.assertEqual(payload.get('error'), u"Course update not found.")
self.assertEqual(resp.status_code, 404)
# now check that course update don't munges html
update_content = u"""<problem>
<p></p>
<multiplechoiceresponse>
<pre><problem>
<p></p></pre>
<div><foo>bar</foo></div>"""
payload = {'content': update_content, 'date': update_date}
resp = self.client.ajax_post(
course_update_url, payload, REQUEST_METHOD="POST"
)
self.assertHTMLEqual(update_content, json.loads(resp.content)['content'])
def test_no_ol_course_update(self):
'''Test trying to add to a saved course_update which is not an ol.'''
# get the updates and set to something wrong
location = self.course.id.make_usage_key('course_info', 'updates')
modulestore().create_item(
self.user.id,
location.course_key,
location.block_type,
block_id=location.block_id
)
course_updates = modulestore().get_item(location)
course_updates.data = 'bad news'
modulestore().update_item(course_updates, self.user.id)
init_content = '<iframe width="560" height="315" src="http://www.youtube.com/embed/RocY-Jd93XU" frameborder="0">'
content = init_content + '</iframe>'
payload = {'content': content, 'date': 'January 8, 2013'}
course_update_url = self.create_update_url()
resp = self.client.ajax_post(course_update_url, payload)
payload = json.loads(resp.content)
self.assertHTMLEqual(payload['content'], content)
# now confirm that the bad news and the iframe make up single update
resp = self.client.get_json(course_update_url)
payload = json.loads(resp.content)
self.assertTrue(len(payload) == 1)
def post_course_update(self, send_push_notification=False):
"""
Posts an update to the course
"""
course_update_url = self.create_update_url(course_key=self.course.id)
# create a course via the view handler
self.client.ajax_post(course_update_url)
content = u"Sample update"
payload = {'content': content, 'date': 'January 8, 2013'}
if send_push_notification:
payload['push_notification_selected'] = True
resp = self.client.ajax_post(course_update_url, payload)
# check that response status is 200 not 400
self.assertEqual(resp.status_code, 200)
payload = json.loads(resp.content)
self.assertHTMLEqual(payload['content'], content)
@patch("contentstore.push_notification.send_push_course_update")
def test_post_course_update(self, mock_push_update):
"""
Test that a user can successfully post on course updates and handouts of a course
"""
self.post_course_update()
# check that push notifications are not sent
self.assertFalse(mock_push_update.called)
updates_location = self.course.id.make_usage_key('course_info', 'updates')
self.assertTrue(isinstance(updates_location, UsageKey))
self.assertEqual(updates_location.name, u'updates')
# check posting on handouts
handouts_location = self.course.id.make_usage_key('course_info', 'handouts')
course_handouts_url = reverse_usage_url('xblock_handler', handouts_location)
content = u"Sample handout"
payload = {'data': content}
resp = self.client.ajax_post(course_handouts_url, payload)
# check that response status is 200 not 500
self.assertEqual(resp.status_code, 200)
payload = json.loads(resp.content)
self.assertHTMLEqual(payload['data'], content)
@patch("contentstore.push_notification.send_push_course_update")
def test_notifications_enabled_but_not_requested(self, mock_push_update):
PushNotificationConfig(enabled=True).save()
self.post_course_update()
self.assertFalse(mock_push_update.called)
@patch("contentstore.push_notification.send_push_course_update")
def test_notifications_enabled_and_sent(self, mock_push_update):
PushNotificationConfig(enabled=True).save()
self.post_course_update(send_push_notification=True)
self.assertTrue(mock_push_update.called)
@override_settings(PARSE_KEYS={"APPLICATION_ID": "TEST_APPLICATION_ID", "REST_API_KEY": "TEST_REST_API_KEY"})
@patch("contentstore.push_notification.Push")
def test_notifications_sent_to_parse(self, mock_parse_push):
PushNotificationConfig(enabled=True).save()
self.post_course_update(send_push_notification=True)
self.assertEquals(mock_parse_push.alert.call_count, 2)
@override_settings(PARSE_KEYS={"APPLICATION_ID": "TEST_APPLICATION_ID", "REST_API_KEY": "TEST_REST_API_KEY"})
@patch("contentstore.push_notification.log_exception")
@patch("contentstore.push_notification.Push")
def test_notifications_error_from_parse(self, mock_parse_push, mock_log_exception):
PushNotificationConfig(enabled=True).save()
from parse_rest.core import ParseError
mock_parse_push.alert.side_effect = ParseError
self.post_course_update(send_push_notification=True)
self.assertTrue(mock_log_exception.called)
| agpl-3.0 |
luispedro/jug | jug/backends/file_keepalive_monitor.py | 1 | 2508 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2017, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
file_store_monitor : part of file_store to keep locks alive during execution
'''
from os import utime, getppid, kill
from sys import argv
from time import sleep
def parent_gone_or_changed(pid):
current_parent = getppid()
if current_parent != pid or current_parent == 1:
# A different parent means parent died and/or we got disowned, neither
# of which should happen in normal conditions
# If PID 1 we are running under init and that also means we got disowned
return True
# Test if parent is still alive
try:
kill(pid, 0)
except OSError:
return True
else:
return False
def main():
lock = argv[1]
parent = getppid()
# 5 * 60 = once every 5 minutes
counter = counter_start = 60
while True:
# self check every 5 seconds
sleep(5)
# We die if our parent went away
if parent_gone_or_changed(parent):
break
# but only update the lock once every 5 minutes for IO reasons
counter -= 1
if counter <= 0:
counter = counter_start
try:
utime(lock, None)
except OSError:
# Lock is no longer available
break
if __name__ == '__main__':
main()
| mit |
Simounet/Rhythmbox-Ampache | AmpacheConfigDialog.py | 1 | 1194 | import gtk, gtk.glade
class AmpacheConfigDialog(object):
def __init__(self, glade_file, config):
self.gladexml = gtk.glade.XML(glade_file)
self.config = config
self.config_dialog = self.gladexml.get_widget("preferences_dialog")
self.url = self.gladexml.get_widget("url_entry")
self.url.set_text(self.config.get("url"))
self.username = self.gladexml.get_widget("username_entry")
self.username.set_text(self.config.get("username"))
self.password = self.gladexml.get_widget("password_entry")
self.password.set_text(self.config.get("password"))
self.password.set_visibility(False)
self.config_dialog.connect("response", self.dialog_response)
def get_dialog(self):
return self.config_dialog
def dialog_response(self, dialog, response):
if response == gtk.RESPONSE_OK:
self.config.set("url", self.url.get_text())
self.config.set("username", self.username.get_text())
self.config.set("password", self.password.get_text())
self.config_dialog.hide()
elif response == gtk.RESPONSE_CANCEL or response == gtk.RESPONSE_DELETE_EVENT:
self.config_dialog.hide()
else:
print "unexpected response type in dialog_response"
self.config_dialog.hide()
| gpl-2.0 |
amanharitsh123/zulip | scripts/lib/zulip_tools.py | 1 | 10249 | #!/usr/bin/env python3
import argparse
import datetime
import errno
import hashlib
import logging
import os
import pwd
import re
import shutil
import subprocess
import sys
import time
import json
if False:
from typing import Sequence, Set, Text, Any
DEPLOYMENTS_DIR = "/home/zulip/deployments"
LOCK_DIR = os.path.join(DEPLOYMENTS_DIR, "lock")
TIMESTAMP_FORMAT = '%Y-%m-%d-%H-%M-%S'
# Color codes
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BLACKONYELLOW = '\x1b[0;30;43m'
WHITEONRED = '\x1b[0;37;41m'
BOLDRED = '\x1B[1;31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
BLUE = '\x1b[34m'
MAGENTA = '\x1b[35m'
CYAN = '\x1b[36m'
def parse_cache_script_args(description):
# type: (Text) -> argparse.Namespace
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"--threshold", dest="threshold_days", type=int, default=14,
nargs="?", metavar="<days>", help="Any cache which is not in "
"use by a deployment not older than threshold days(current "
"installation in dev) and older than threshold days will be "
"deleted. (defaults to 14)")
parser.add_argument(
"--dry-run", dest="dry_run", action="store_true",
help="If specified then script will only print the caches "
"that it will delete/keep back. It will not delete any cache.")
parser.add_argument(
"--verbose", dest="verbose", action="store_true",
help="If specified then script will print a detailed report "
"of what is being will deleted/kept back.")
args = parser.parse_args()
args.verbose |= args.dry_run # Always print a detailed report in case of dry run.
return args
def get_deployment_version(extract_path):
# type: (str) -> str
version = '0.0.0'
for item in os.listdir(extract_path):
item_path = os.path.join(extract_path, item)
if item.startswith('zulip-server') and os.path.isdir(item_path):
with open(os.path.join(item_path, 'version.py')) as f:
result = re.search('ZULIP_VERSION = "(.*)"', f.read())
if result:
version = result.groups()[0]
break
return version
def is_invalid_upgrade(current_version, new_version):
# type: (str, str) -> bool
if new_version > '1.4.3' and current_version <= '1.3.10':
return True
return False
def subprocess_text_output(args):
# type: (Sequence[str]) -> str
return subprocess.check_output(args, universal_newlines=True).strip()
def su_to_zulip():
# type: () -> None
pwent = pwd.getpwnam("zulip")
os.setgid(pwent.pw_gid)
os.setuid(pwent.pw_uid)
os.environ['HOME'] = os.path.abspath(os.path.join(DEPLOYMENTS_DIR, '..'))
def make_deploy_path():
# type: () -> str
timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)
return os.path.join(DEPLOYMENTS_DIR, timestamp)
if __name__ == '__main__':
cmd = sys.argv[1]
if cmd == 'make_deploy_path':
print(make_deploy_path())
def mkdir_p(path):
# type: (str) -> None
# Python doesn't have an analog to `mkdir -p` < Python 3.2.
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_deployment_lock(error_rerun_script):
# type: (str) -> None
start_time = time.time()
got_lock = False
while time.time() - start_time < 300:
try:
os.mkdir(LOCK_DIR)
got_lock = True
break
except OSError:
print(WARNING + "Another deployment in progress; waiting for lock... " +
"(If no deployment is running, rmdir %s)" % (LOCK_DIR,) + ENDC)
sys.stdout.flush()
time.sleep(3)
if not got_lock:
print(FAIL + "Deployment already in progress. Please run\n" +
" %s\n" % (error_rerun_script,) +
"manually when the previous deployment finishes, or run\n" +
" rmdir %s\n" % (LOCK_DIR,) +
"if the previous deployment crashed." +
ENDC)
sys.exit(1)
def release_deployment_lock():
# type: () -> None
shutil.rmtree(LOCK_DIR)
def run(args, **kwargs):
# type: (Sequence[str], **Any) -> None
# Output what we're doing in the `set -x` style
print("+ %s" % (" ".join(args)))
if kwargs.get('shell'):
# With shell=True we can only pass string to Popen
args = " ".join(args)
try:
subprocess.check_call(args, **kwargs)
except subprocess.CalledProcessError:
print()
print(WHITEONRED + "Error running a subcommand of %s: %s" % (sys.argv[0], " ".join(args)) +
ENDC)
print(WHITEONRED + "Actual error output for the subcommand is just above this." +
ENDC)
print()
raise
def log_management_command(cmd, log_path):
# type: (Text, Text) -> None
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
formatter = logging.Formatter("%(asctime)s: %(message)s")
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(formatter)
logger = logging.getLogger("zulip.management")
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
logger.info("Ran '%s'" % (cmd,))
def get_environment():
# type: () -> Text
if os.path.exists(DEPLOYMENTS_DIR):
return "prod"
if os.environ.get("TRAVIS"):
return "travis"
return "dev"
def get_recent_deployments(threshold_days):
# type: (int) -> Set[Text]
# Returns a list of deployments not older than threshold days
# including `/root/zulip` directory if it exists.
recent = set()
threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days)
for dir_name in os.listdir(DEPLOYMENTS_DIR):
if not os.path.isdir(os.path.join(DEPLOYMENTS_DIR, dir_name)):
# Skip things like uwsgi sockets, symlinks, etc.
continue
if not os.path.exists(os.path.join(DEPLOYMENTS_DIR, dir_name, "zerver")):
# Skip things like "lock" that aren't actually a deployment directory
continue
try:
date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT)
if date >= threshold_date:
recent.add(os.path.join(DEPLOYMENTS_DIR, dir_name))
except ValueError:
# Always include deployments whose name is not in the format of a timestamp.
recent.add(os.path.join(DEPLOYMENTS_DIR, dir_name))
if os.path.exists("/root/zulip"):
recent.add("/root/zulip")
return recent
def get_threshold_timestamp(threshold_days):
# type: (int) -> int
# Given number of days, this function returns timestamp corresponding
# to the time prior to given number of days.
threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days)
threshold_timestamp = int(time.mktime(threshold.utctimetuple()))
return threshold_timestamp
def get_caches_to_be_purged(caches_dir, caches_in_use, threshold_days):
# type: (Text, Set[Text], int) -> Set[Text]
# Given a directory containing caches, a list of caches in use
# and threshold days, this function return a list of caches
# which can be purged. Remove the cache only if it is:
# 1: Not in use by the current installation(in dev as well as in prod).
# 2: Not in use by a deployment not older than `threshold_days`(in prod).
# 3: Not in use by '/root/zulip'.
# 4: Not older than `threshold_days`.
caches_to_purge = set()
threshold_timestamp = get_threshold_timestamp(threshold_days)
for cache_dir_base in os.listdir(caches_dir):
cache_dir = os.path.join(caches_dir, cache_dir_base)
if cache_dir in caches_in_use:
# Never purge a cache which is in use.
continue
if os.path.getctime(cache_dir) < threshold_timestamp:
caches_to_purge.add(cache_dir)
return caches_to_purge
def purge_unused_caches(caches_dir, caches_in_use, cache_type, args):
# type: (Text, Set[Text], Text, argparse.Namespace) -> None
all_caches = set([os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)])
caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)
caches_to_keep = all_caches - caches_to_purge
may_be_perform_purging(
caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose)
if args.verbose:
print("Done!")
def generate_sha1sum_emoji(zulip_path):
# type: (Text) -> Text
ZULIP_EMOJI_DIR = os.path.join(zulip_path, 'tools', 'setup', 'emoji')
sha = hashlib.sha1()
filenames = ['emoji_map.json', 'build_emoji', 'emoji_setup_utils.py']
for filename in filenames:
file_path = os.path.join(ZULIP_EMOJI_DIR, filename)
with open(file_path, 'rb') as reader:
sha.update(reader.read())
# Take into account the version of `emoji-datasource` package while generating success stamp.
PACKAGE_FILE_PATH = os.path.join(zulip_path, 'package.json')
with open(PACKAGE_FILE_PATH, 'r') as fp:
parsed_package_file = json.load(fp)
dependency_data = parsed_package_file['dependencies']
if 'emoji-datasource' in dependency_data:
emoji_datasource_version = dependency_data['emoji-datasource'].encode('utf-8')
else:
emoji_datasource_version = b"0"
sha.update(emoji_datasource_version)
return sha.hexdigest()
def may_be_perform_purging(dirs_to_purge, dirs_to_keep, dir_type, dry_run, verbose):
# type: (Set[Text], Set[Text], Text, bool, bool) -> None
if dry_run:
print("Performing a dry run...")
else:
print("Cleaning unused %ss..." % (dir_type,))
for directory in dirs_to_purge:
if verbose:
print("Cleaning unused %s: %s" % (dir_type, directory))
if not dry_run:
subprocess.check_call(["sudo", "rm", "-rf", directory])
for directory in dirs_to_keep:
if verbose:
print("Keeping used %s: %s" % (dir_type, directory))
| apache-2.0 |
hgl888/chromium-crosswalk | tools/telemetry/telemetry/core/platform/__init__.py | 5 | 11324 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging as real_logging
import os
import sys
from telemetry.core import discover
from telemetry.core.platform import network_controller
from telemetry.core.platform import platform_backend as platform_backend_module
from telemetry.core.platform import tracing_controller
from telemetry.core import util
_host_platform = None
# Remote platform is a dictionary from device ids to remote platform instances.
_remote_platforms = {}
def _InitHostPlatformIfNeeded():
global _host_platform
if _host_platform:
return
backend = None
for platform_backend_class in _IterAllPlatformBackendClasses():
if platform_backend_class.IsPlatformBackendForHost():
backend = platform_backend_class()
break
if not backend:
raise NotImplementedError()
_host_platform = Platform(backend)
def GetHostPlatform():
_InitHostPlatformIfNeeded()
return _host_platform
def _IterAllPlatformBackendClasses():
platform_dir = os.path.dirname(os.path.realpath(__file__))
return discover.DiscoverClasses(
platform_dir, util.GetTelemetryDir(),
platform_backend_module.PlatformBackend).itervalues()
def GetPlatformForDevice(device, finder_options, logging=real_logging):
""" Returns a platform instance for the device.
Args:
device: a device.Device instance.
"""
if device.guid in _remote_platforms:
return _remote_platforms[device.guid]
try:
for platform_backend_class in _IterAllPlatformBackendClasses():
if platform_backend_class.SupportsDevice(device):
_remote_platforms[device.guid] = (
platform_backend_class.CreatePlatformForDevice(device,
finder_options))
return _remote_platforms[device.guid]
return None
except Exception:
logging.error('Fail to create platform instance for %s.', device.name)
raise
class Platform(object):
"""The platform that the target browser is running on.
Provides a limited interface to interact with the platform itself, where
possible. It's important to note that platforms may not provide a specific
API, so check with IsFooBar() for availability.
"""
def __init__(self, platform_backend):
self._platform_backend = platform_backend
self._platform_backend.InitPlatformBackend()
self._platform_backend.SetPlatform(self)
self._network_controller = network_controller.NetworkController(
self._platform_backend.network_controller_backend)
self._tracing_controller = tracing_controller.TracingController(
self._platform_backend.tracing_controller_backend)
@property
def is_host_platform(self):
return self == GetHostPlatform()
@property
def network_controller(self):
"""Control network settings and servers to simulate the Web."""
return self._network_controller
@property
def tracing_controller(self):
return self._tracing_controller
def CanMonitorThermalThrottling(self):
"""Platforms may be able to detect thermal throttling.
Some fan-less computers go into a reduced performance mode when their heat
exceeds a certain threshold. Performance tests in particular should use this
API to detect if this has happened and interpret results accordingly.
"""
return self._platform_backend.CanMonitorThermalThrottling()
def IsThermallyThrottled(self):
"""Returns True if the device is currently thermally throttled."""
return self._platform_backend.IsThermallyThrottled()
def HasBeenThermallyThrottled(self):
"""Returns True if the device has been thermally throttled."""
return self._platform_backend.HasBeenThermallyThrottled()
def GetDeviceTypeName(self):
"""Returns a string description of the Platform device, or None.
Examples: Nexus 7, Nexus 6, Desktop"""
return self._platform_backend.GetDeviceTypeName()
def GetArchName(self):
"""Returns a string description of the Platform architecture.
Examples: x86_64 (posix), AMD64 (win), armeabi-v7a, x86"""
return self._platform_backend.GetArchName()
def GetOSName(self):
"""Returns a string description of the Platform OS.
Examples: WIN, MAC, LINUX, CHROMEOS"""
return self._platform_backend.GetOSName()
def GetOSVersionName(self):
"""Returns a logically sortable, string-like description of the Platform OS
version.
Examples: VISTA, WIN7, LION, MOUNTAINLION"""
return self._platform_backend.GetOSVersionName()
def GetOSVersionNumber(self):
"""Returns an integer description of the Platform OS major version.
Examples: On Mac, 13 for Mavericks, 14 for Yosemite."""
return self._platform_backend.GetOSVersionNumber()
def CanFlushIndividualFilesFromSystemCache(self):
"""Returns true if the disk cache can be flushed for specific files."""
return self._platform_backend.CanFlushIndividualFilesFromSystemCache()
def FlushEntireSystemCache(self):
"""Flushes the OS's file cache completely.
This function may require root or administrator access."""
return self._platform_backend.FlushEntireSystemCache()
def FlushSystemCacheForDirectory(self, directory):
"""Flushes the OS's file cache for the specified directory.
Any files or directories inside |directory| matching a name in the
|ignoring| list will be skipped.
This function does not require root or administrator access."""
return self._platform_backend.FlushSystemCacheForDirectory(directory)
def FlushDnsCache(self):
"""Flushes the OS's DNS cache completely.
This function may require root or administrator access."""
return self._platform_backend.FlushDnsCache()
def LaunchApplication(self, application, parameters=None,
elevate_privilege=False):
""""Launches the given |application| with a list of |parameters| on the OS.
Set |elevate_privilege| to launch the application with root or admin rights.
Returns:
A popen style process handle for host platforms.
"""
return self._platform_backend.LaunchApplication(
application, parameters, elevate_privilege=elevate_privilege)
def IsApplicationRunning(self, application):
"""Returns whether an application is currently running."""
return self._platform_backend.IsApplicationRunning(application)
def CanLaunchApplication(self, application):
"""Returns whether the platform can launch the given application."""
return self._platform_backend.CanLaunchApplication(application)
def InstallApplication(self, application):
"""Installs the given application."""
return self._platform_backend.InstallApplication(application)
def CanCaptureVideo(self):
"""Returns a bool indicating whether the platform supports video capture."""
return self._platform_backend.CanCaptureVideo()
def StartVideoCapture(self, min_bitrate_mbps):
"""Starts capturing video.
Outer framing may be included (from the OS, browser window, and webcam).
Args:
min_bitrate_mbps: The minimum capture bitrate in MegaBits Per Second.
The platform is free to deliver a higher bitrate if it can do so
without increasing overhead.
Raises:
ValueError if the required |min_bitrate_mbps| can't be achieved.
"""
return self._platform_backend.StartVideoCapture(min_bitrate_mbps)
def StopVideoCapture(self):
"""Stops capturing video.
Returns:
A telemetry.core.video.Video object.
"""
return self._platform_backend.StopVideoCapture()
def CanMonitorPower(self):
"""Returns True iff power can be monitored asynchronously via
StartMonitoringPower() and StopMonitoringPower().
"""
return self._platform_backend.CanMonitorPower()
def CanMeasurePerApplicationPower(self):
"""Returns True if the power monitor can measure power for the target
application in isolation. False if power measurement is for full system
energy consumption."""
return self._platform_backend.CanMeasurePerApplicationPower()
def StartMonitoringPower(self, browser):
"""Starts monitoring power utilization statistics.
Args:
browser: The browser to monitor.
"""
assert self._platform_backend.CanMonitorPower()
self._platform_backend.StartMonitoringPower(browser)
def StopMonitoringPower(self):
"""Stops monitoring power utilization and returns stats
Returns:
None if power measurement failed for some reason, otherwise a dict of
power utilization statistics containing: {
# An identifier for the data provider. Allows to evaluate the precision
# of the data. Example values: monsoon, powermetrics, ds2784
'identifier': identifier,
# The instantaneous power (voltage * current) reading in milliwatts at
# each sample.
'power_samples_mw': [mw0, mw1, ..., mwN],
# The full system energy consumption during the sampling period in
# milliwatt hours. May be estimated by integrating power samples or may
# be exact on supported hardware.
'energy_consumption_mwh': mwh,
# The target application's energy consumption during the sampling period
# in milliwatt hours. Should be returned iff
# CanMeasurePerApplicationPower() return true.
'application_energy_consumption_mwh': mwh,
# A platform-specific dictionary of additional details about the
# utilization of individual hardware components.
component_utilization: {
# Platform-specific data not attributed to any particular hardware
# component.
whole_package: {
# Device-specific onboard temperature sensor.
'average_temperature_c': c,
...
}
...
}
}
"""
return self._platform_backend.StopMonitoringPower()
def CanMonitorNetworkData(self):
"""Returns true if network data can be retrieved, false otherwise."""
return self._platform_backend.CanMonitorNetworkData()
def GetNetworkData(self, browser):
"""Get current network data.
Returns:
Tuple of (sent_data, received_data) in kb if data can be found,
None otherwise.
"""
assert browser.platform == self
return self._platform_backend.GetNetworkData(browser)
def IsCooperativeShutdownSupported(self):
"""Indicates whether CooperativelyShutdown, below, is supported.
It is not necessary to implement it on all platforms."""
return self._platform_backend.IsCooperativeShutdownSupported()
def CooperativelyShutdown(self, proc, app_name):
"""Cooperatively shut down the given process from subprocess.Popen.
Currently this is only implemented on Windows. See
crbug.com/424024 for background on why it was added.
Args:
proc: a process object returned from subprocess.Popen.
app_name: on Windows, is the prefix of the application's window
class name that should be searched for. This helps ensure
that only the application's windows are closed.
Returns True if it is believed the attempt succeeded.
"""
return self._platform_backend.CooperativelyShutdown(proc, app_name)
| bsd-3-clause |
gibiansky/tensorflow | tensorflow/python/kernel_tests/pooling_ops_3d_test.py | 29 | 11774 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class PoolingTest(test.TestCase):
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called: co.MaxPool, co.AvgPool.
input_sizes: Input tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=True) as sess:
t = constant_op.constant(x, shape=input_sizes)
t = pool_func(
t,
ksize=[1, window[0], window[1], window[2], 1],
strides=[1, strides[0], strides[1], strides[2], 1],
padding=padding)
vals = sess.run(t)
# Verifies values.
actual = vals.flatten()
self.assertAllClose(expected, actual)
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID",
expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 4, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME",
expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=(1, 2, 3),
strides=(2, 3, 1),
padding="SAME",
expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID",
expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 2, 2, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME",
expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=(1, 2, 3),
strides=(2, 3, 1),
padding="SAME",
expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
# Simulate max pooling in numpy to get the expected output.
input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))
input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],
mode="constant")
expected_output = input_data[:, 1::2, 1::2, :]
expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 27, 27, 64],
window=(1, 2, 2),
strides=(1, 2, 2),
padding="SAME",
expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[58, 61, 79, 82, 205, 208, 226, 229])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _ConstructAndTestGradient(self,
pool_func,
input_sizes,
output_sizes,
window,
strides,
padding,
x_init_value=None):
"""Verifies the gradients of the avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
x_init_value: Values to be passed to the gradient checker.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
err_margin = 1e-3
if pool_func == nn_ops.avg_pool3d:
func_name = "avg_pool3d"
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool3d"
t = pool_func(
input_tensor,
ksize=[1, window[0], window[1], window[2], 1],
strides=[1, strides[0], strides[1], strides[2], 1],
padding=padding,
name=func_name)
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
print("%s gradient error = " % func_name, err)
self.assertLess(err, err_margin)
def testMaxPoolGradValidPadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 3, 3, 3, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding2_1_6_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 3, 3, 6, 3],
output_sizes=[2, 2, 2, 5, 3],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding2_1_7_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 3, 5, 7, 3],
output_sizes=[2, 2, 4, 6, 3],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 2, 2, 2, 3],
output_sizes=[2, 1, 1, 1, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID")
def testMaxPoolGradSamePadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 3, 2, 4, 1],
output_sizes=[2, 3, 2, 4, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="SAME")
def testMaxPoolGradSamePadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 3, 2, 4, 1],
output_sizes=[2, 3, 2, 4, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="SAME")
def testMaxPoolGradSamePadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 5, 2, 4, 3],
output_sizes=[2, 3, 1, 2, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME")
def testMaxPoolGradSamePadding3_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 7, 1],
output_sizes=[1, 3, 3, 7, 1],
window=(3, 3, 3),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradValidPadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 3, 3, 3, 3],
output_sizes=[2, 3, 3, 3, 3],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="VALID")
def testAvgPoolGradValidPadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 3, 3, 3, 3],
output_sizes=[2, 2, 2, 2, 3],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testAvgPoolGradValidPadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 2, 2, 2, 3],
output_sizes=[2, 1, 1, 1, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID")
def testAvgPoolGradSamePadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 3, 2, 4, 3],
output_sizes=[2, 3, 2, 4, 3],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradSamePadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 2, 1],
output_sizes=[1, 2, 2, 2, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradSamePadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 5, 2, 4, 3],
output_sizes=[2, 3, 1, 2, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME")
def testAvgPoolGradSamePadding3_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 6, 7, 1],
output_sizes=[1, 3, 6, 7, 1],
window=(3, 3, 3),
strides=(1, 1, 1),
padding="SAME")
if __name__ == "__main__":
test.main()
| apache-2.0 |
microcom/odoo | addons/website_form/models/models.py | 27 | 4653 | import itertools
from openerp import tools
from openerp import models, fields, api
MAGIC_FIELDS = ["id", "create_uid", "create_date", "write_uid", "write_date", "__last_update"]
class website_form_config(models.Model):
_inherit = 'website'
website_form_enable_metadata = fields.Boolean('Write metadata',help="Enable writing metadata on form submit.")
class website_form_model(models.Model):
_name = 'ir.model'
_inherit = 'ir.model'
website_form_access = fields.Boolean('Allowed to use in forms', help='Enable the form builder feature for this model.')
website_form_default_field_id = fields.Many2one('ir.model.fields', 'Field for custom form data', domain="[('model', '=', model), ('ttype', '=', 'text')]", help="Specify the field wich will contain meta and custom form fields datas.")
website_form_label = fields.Char("Label for form action", help="Form action label. Ex: crm.lead could be 'Send an e-mail' and project.issue could be 'Create an Issue'.")
def _all_inherited_model_ids(self):
return list(itertools.chain(
[self.id],
*(m._all_inherited_model_ids() for m in self.inherited_model_ids)
))
def _get_form_writable_fields(self):
"""
Restriction of "authorized fields" (fields which can be used in the
form builders) to fields which have actually been opted into form
builders and are writable. By default no field is writable by the
form builder.
"""
excluded = {
field.name
for field in self.env['ir.model.fields'].sudo().search([
('model_id', 'in', self._all_inherited_model_ids()),
('website_form_blacklisted', '=', True)
])
}
return {
k: v for k, v in self.get_authorized_fields().iteritems()
if k not in excluded
}
@api.multi
def get_authorized_fields(self):
model = self.env[self.model]
fields_get = model.fields_get()
for key, val in model._inherits.iteritems():
fields_get.pop(val,None)
# Unrequire fields with default values
default_values = model.default_get(fields_get.keys())
for field in [f for f in fields_get if f in default_values]:
fields_get[field]['required'] = False
# Remove readonly and magic fields
for field in fields_get.keys():
if fields_get[field]['readonly'] or field in MAGIC_FIELDS:
del fields_get[field]
return fields_get
class website_form_model_fields(models.Model):
""" fields configuration for form builder """
_name = 'ir.model.fields'
_inherit = 'ir.model.fields'
def init(self, cr):
# set all existing unset website_form_blacklisted fields to ``true``
# (so that we can use it as a whitelist rather than a blacklist)
cr.execute('UPDATE ir_model_fields'
' SET website_form_blacklisted=true'
' WHERE website_form_blacklisted IS NULL')
# add an SQL-level default value on website_form_blacklisted to that
# pure-SQL ir.model.field creations (e.g. in _field_create) generate
# the right default value for a whitelist (aka fields should be
# blacklisted by default)
cr.execute('ALTER TABLE ir_model_fields '
' ALTER COLUMN website_form_blacklisted SET DEFAULT true')
@api.model
def formbuilder_whitelist(self, model, fields):
"""
:param str model: name of the model on which to whitelist fields
:param list(str) fields: list of fields to whitelist on the model
:return: nothing of import
"""
# postgres does *not* like ``in [EMPTY TUPLE]`` queries
if not fields: return False
# only allow users who can change the website structure
if not self.env['res.users'].has_group('base.group_website_designer'):
return False
# the ORM only allows writing on custom fields and will trigger a
# registry reload once that's happened. We want to be able to
# whitelist non-custom fields and the registry reload absolutely
# isn't desirable, so go with a method and raw SQL
self.env.cr.execute(
"UPDATE ir_model_fields"
" SET website_form_blacklisted=false"
" WHERE model=%s AND name in %s", (model, tuple(fields)))
return True
website_form_blacklisted = fields.Boolean(
'Blacklisted in web forms', default=True, select=True, # required=True,
help='Blacklist this field for web forms'
)
| agpl-3.0 |
galak/zephyr | scripts/kconfig/hardenconfig.py | 6 | 2260 | #!/usr/bin/env python3
# Copyright (c) 2019 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import csv
import os
from kconfiglib import standard_kconfig
def hardenconfig(kconf):
kconf.load_config()
hardened_kconf_filename = os.path.join(os.environ['ZEPHYR_BASE'],
'scripts', 'kconfig', 'hardened.csv')
options = compare_with_hardened_conf(kconf, hardened_kconf_filename)
display_results(options)
class Option:
def __init__(self, name, recommended, current=None, symbol=None):
self.name = name
self.recommended = recommended
self.current = current
self.symbol = symbol
if current is None:
self.result = 'NA'
elif recommended == current:
self.result = 'PASS'
else:
self.result = 'FAIL'
def compare_with_hardened_conf(kconf, hardened_kconf_filename):
options = []
with open(hardened_kconf_filename) as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
if len(row) > 1:
name = row[0]
recommended = row[1]
try:
symbol = kconf.syms[name]
current = symbol.str_value
except KeyError:
symbol = None
current = None
options.append(Option(name=name, current=current,
recommended=recommended, symbol=symbol))
return options
def display_results(options):
# header
print('{:^50}|{:^13}|{:^20}'.format('name', 'current', 'recommended'), end='')
print('||{:^28}\n'.format('check result'), end='')
print('=' * 116)
# results, only printing options that have failed for now. It simplify the readability.
# TODO: add command line option to show all results
for opt in options:
if opt.result == 'FAIL' and opt.symbol.visibility != 0:
print('CONFIG_{:<43}|{:^13}|{:^20}'.format(
opt.name, opt.current, opt.recommended), end='')
print('||{:^28}\n'.format(opt.result), end='')
print()
def main():
hardenconfig(standard_kconfig())
if __name__ == '__main__':
main()
| apache-2.0 |
SeanCameronConklin/aima-python | submissions/Ottenlips/mygames.py | 18 | 5079 | from games import Game
from math import nan, isnan
from queue import PriorityQueue
from copy import deepcopy
from utils import isnumber
from grading.util import print_table
class GameState:
def __init__(self, to_move, position, board, label=None):
self.to_move = to_move
self.position = position
self.board = board
self.label = label
self.scores = {'S': 0}
def __str__(self):
if self.label == None:
return super(GameState, self).__str__()
return self.label
class Move:
def __init__(self, p, v):
self.position = p
self.value = v
self.initial = GameState(to_move='Player One')
def pv(self):
return self.position, self.value
# def getValue(self):
class Star29(Game):
"""
An implementation of ThinkAhead
"""
def __init__(self, state):
self.initial = state
self.startingBoard = state.board
def actions(self, state):
# ""
p = state.position
return state.board
# defines the order of play
def opponent(self, player):
if player == 'Player One':
return 'Player Two'
if player == 'Player One':
return 'Player Two'
return None
def result(self, state, move):
p = move
# state.board[p] = v
currMover = state.to_move
nextMover = self.opponent(currMover)
value = move
# state.board[0] +
newState = deepcopy(state)
newState.to_move = nextMover
newState.position = p
index = newState.board.index(value)
if value == self.startingBoard[0]:
newState.board = (self.startingBoard[2], self.startingBoard[3])
if value == self.startingBoard[1]:
# newState.board = (4,5)
newState.board = (self.startingBoard[3], self.startingBoard[4])
if value == self.startingBoard[2]:
# newState.board = (5,1)
newState.board = (self.startingBoard[4], self.startingBoard[0])
if value == self.startingBoard[3]:
# newState.board = (1, 2)
newState.board = (self.startingBoard[0], self.startingBoard[1])
if value == self.startingBoard[4]:
# newState.board = (2, 3)
newState.board = (self.startingBoard[1], self.startingBoard[2])
# newState.board[p] = nan
newState.scores['S'] += value
# newState.scores['P1'] += value
# newState.scores['P2'] += value
# if currMover == 'Player One':
# newState.scores['P1'] += value
# elif currMover == 'Player Two':
# newState.scores['P2'] += value
self.lastMove = newState.position
return newState
def utility(self, state, player):
"if player goes over 29 they loose"
if player == 'Player One' and state.scores['S'] == 29:
return -1;
if state.scores['S'] <= 28:
if range(2,5) in state.board:
return -1
else:
return 1
# if state.scores['S'] > 28:
# return -state.scores['S']
# if state.scores['S'] == 28:
# return state.scores['P1'] - state.scores['P1']
return 0
def terminal_test(self, state):
"A state is terminal if it is over 29."
if state.scores['S'] >= 29:
return 1
return 0
def display(self, state):
if(len(state.board)==2):
print("--> "+str(state.board[0])+" or --> "+ str(state.board[1]))
else:
print("First move "+"\n " + str(state.board[0])+"\n"+str(state.board[1])+" "+str(state.board[4])+"\n"+str(state.board[2])+" "+str(state.board[3]))
print('Score: ' + str(state.scores))
# def check_win(self, board, player, state):
# if state.scores['P2'] >= 29 and player=="Player Two" :
# return 1
# if state.scores['P1'] >= 29 and player == "Player One":
# return -1
#
# return 0
full_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'full'
)
full_game.scores = {'S':0}
next_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'S 5'
)
next_game.scores = {'S':5}
next_game2 = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'S 10'
)
next_game2.scores = {'S':10}
mid_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'S 15'
)
mid_game.scores = {'S':15}
almost_done_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'S 20'
)
almost_done_game.scores = {'S':20}
won_game = GameState(
to_move = 'Player One',
position = 0,
board=[1,2,3,4,5],
label = 'wonS27'
)
won_game.scores = {'S':27}
thinkA = Star29(full_game)
myGames = {
thinkA: [
full_game,
next_game,
next_game2,
mid_game,
almost_done_game,
won_game
]
} | mit |
Zerschmetterling91/three.js | utils/converters/fbx/convert_to_threejs.py | 213 | 77684 | # @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
import operator
import re
import json
import types
import shutil
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_copy_textures = True
option_prefix = True
option_geometry = False
option_forced_y_up = False
option_default_camera = False
option_default_light = False
option_pretty_print = False
converter = None
inputFolder = ""
outputFolder = ""
# #####################################################
# Pretty Printing Hacks
# #####################################################
# Force an array to be printed fully on a single line
class NoIndent(object):
def __init__(self, value, separator = ','):
self.separator = separator
self.value = value
def encode(self):
if not self.value:
return None
return '[ %s ]' % self.separator.join(str(f) for f in self.value)
# Force an array into chunks rather than printing each element on a new line
class ChunkedIndent(object):
def __init__(self, value, chunk_size = 15, force_rounding = False):
self.value = value
self.size = chunk_size
self.force_rounding = force_rounding
def encode(self):
# Turn the flat array into an array of arrays where each subarray is of
# length chunk_size. Then string concat the values in the chunked
# arrays, delimited with a ', ' and round the values finally append
# '{CHUNK}' so that we can find the strings with regex later
if not self.value:
return None
if self.force_rounding:
return ['{CHUNK}%s' % ', '.join(str(round(f, 6)) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
else:
return ['{CHUNK}%s' % ', '.join(str(f) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
# This custom encoder looks for instances of NoIndent or ChunkedIndent.
# When it finds
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, NoIndent) or isinstance(obj, ChunkedIndent):
return obj.encode()
else:
return json.JSONEncoder.default(self, obj)
def executeRegexHacks(output_string):
# turn strings of arrays into arrays (remove the double quotes)
output_string = re.sub(':\s*\"(\[.*\])\"', r': \1', output_string)
output_string = re.sub('(\n\s*)\"(\[.*\])\"', r'\1\2', output_string)
output_string = re.sub('(\n\s*)\"{CHUNK}(.*)\"', r'\1\2', output_string)
# replace '0metadata' with metadata
output_string = re.sub('0metadata', r'metadata', output_string)
# replace 'zchildren' with children
output_string = re.sub('zchildren', r'children', output_string)
# add an extra newline after '"children": {'
output_string = re.sub('(children.*{\s*\n)', r'\1\n', output_string)
# add an extra newline after '},'
output_string = re.sub('},\s*\n', r'},\n\n', output_string)
# add an extra newline after '\n\s*],'
output_string = re.sub('(\n\s*)],\s*\n', r'\1],\n\n', output_string)
return output_string
# #####################################################
# Object Serializers
# #####################################################
# FbxVector2 is not JSON serializable
def serializeVector2(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5))
if option_pretty_print:
return NoIndent([v[0], v[1]], ', ')
else:
return [v[0], v[1]]
# FbxVector3 is not JSON serializable
def serializeVector3(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2]], ', ')
else:
return [v[0], v[1], v[2]]
# FbxVector4 is not JSON serializable
def serializeVector4(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if math.isnan(v[3]) or math.isinf(v[3]):
v[3] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5), round(v[3], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2], v[3]], ', ')
else:
return [v[0], v[1], v[2], v[3]]
# #####################################################
# Helpers
# #####################################################
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return int(color)
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
tmp = []
for uv in uvs:
tmp.append(uv[0])
tmp.append(uv[1])
if option_pretty_print:
layer = ChunkedIndent(tmp)
else:
layer = tmp
layers.append(layer)
return layers
# #####################################################
# Object Name Helpers
# #####################################################
def hasUniqueName(o, class_id):
scene = o.GetScene()
object_name = o.GetName()
object_id = o.GetUniqueID()
object_count = scene.GetSrcObjectCount(class_id)
for i in range(object_count):
other = scene.GetSrcObject(class_id, i)
other_id = other.GetUniqueID()
other_name = other.GetName()
if other_id == object_id:
continue
if other_name == object_name:
return False
return True
def getObjectName(o, force_prefix = False):
if not o:
return ""
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxNode.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Object_%s_" % object_id
return prefix + object_name
def getMaterialName(o, force_prefix = False):
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxSurfaceMaterial.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Material_%s_" % object_id
return prefix + object_name
def getTextureName(t, force_prefix = False):
if type(t) is FbxFileTexture:
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
else:
texture_id = t.GetName()
if texture_id == "_empty_":
texture_id = ""
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % t.GetUniqueID()
if len(texture_id) == 0:
prefix = prefix[0:len(prefix)-1]
return prefix + texture_id
def getMtlTextureName(texture_name, texture_id, force_prefix = False):
texture_name = os.path.splitext(texture_name)[0]
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % texture_id
return prefix + texture_name
def getPrefixedName(o, prefix):
return (prefix + '_%s_') % o.GetUniqueID() + o.GetName()
# #####################################################
# Triangulation
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate Material Object
# #####################################################
def generate_texture_bindings(material_property, material_params):
# FBX to Three.js texture types
binding_types = {
"DiffuseColor": "map",
"DiffuseFactor": "diffuseFactor",
"EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor",
"AmbientColor": "lightMap", # "ambientMap",
"AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap",
"SpecularFactor": "specularFactor",
"ShininessExponent": "shininessExponent",
"NormalMap": "normalMap",
"Bump": "bumpMap",
"TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor",
"ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor",
"DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
def generate_material_object(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = None
material_params = None
material_type = None
if implementation:
print("Shader materials are not supported")
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
transparent = False
reflectivity = 1
material_type = 'MeshBasicMaterial'
# material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
specular = getHex(material.Specular.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
shininess = material.Shininess.Get()
transparent = False
reflectivity = 1
bumpScale = 1
material_type = 'MeshPhongMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'specular' : specular,
'shininess' : shininess,
'bumpScale' : bumpScale,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
else:
print "Unknown type of Material", getMaterialName(material)
# default to Lambert Material if the current Material type cannot be handeled
if not material_type:
ambient = getHex((0,0,0))
diffuse = getHex((0.5,0.5,0.5))
emissive = getHex((0,0,0))
opacity = 1
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
if option_textures:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, material_params)
material_params['wireframe'] = False
material_params['wireframeLinewidth'] = 1
output = {
'type' : material_type,
'parameters' : material_params
}
return output
def generate_proxy_material_object(node, material_names):
material_type = 'MeshFaceMaterial'
material_params = {
'materials' : material_names
}
output = {
'type' : material_type,
'parameters' : material_params
}
return output
# #####################################################
# Find Scene Materials
# #####################################################
def extract_materials_from_node(node, material_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
if material_count > 1:
proxy_material = generate_proxy_material_object(node, material_names)
proxy_name = getMaterialName(node, True)
material_dict[proxy_name] = proxy_material
def generate_materials_from_hierarchy(node, material_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_dict)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
def generate_material_dict(scene):
material_dict = {}
# generate all materials for this scene
material_count = scene.GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for i in range(material_count):
material = scene.GetSrcObject(FbxSurfaceMaterial.ClassId, i)
material_object = generate_material_object(material)
material_name = getMaterialName(material)
material_dict[material_name] = material_object
# generate material porxies
# Three.js does not support meshs with multiple materials, however it does
# support materials with multiple submaterials
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
return material_dict
# #####################################################
# Generate Texture Object
# #####################################################
def generate_texture_object(texture):
#TODO: extract more texture properties
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
if type(texture) is FbxFileTexture:
url = texture.GetFileName()
else:
url = getTextureName( texture )
#url = replace_inFolder2OutFolder( url )
#print( url )
index = url.rfind( '/' )
if index == -1:
index = url.rfind( '\\' )
filename = url[ index+1 : len(url) ]
output = {
'url': filename,
'fullpath': url,
'repeat': serializeVector2( (1,1) ),
'offset': serializeVector2( texture.GetUVTranslation() ),
'magFilter': 'LinearFilter',
'minFilter': 'LinearMipMapLinearFilter',
'anisotropy': True
}
return output
# #####################################################
# Replace Texture input path to output
# #####################################################
def replace_inFolder2OutFolder(url):
folderIndex = url.find(inputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(inputFolder): ]
url = outputFolder + url
return url
# #####################################################
# Replace Texture output path to input
# #####################################################
def replace_OutFolder2inFolder(url):
folderIndex = url.find(outputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(outputFolder): ]
url = inputFolder + url
return url
# #####################################################
# Find Scene Textures
# #####################################################
def extract_material_textures(material_property, texture_dict):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
def extract_textures_from_node(node, texture_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_dict)
def generate_textures_from_hierarchy(node, texture_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_dict)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
def generate_texture_dict(scene):
if not option_textures:
return {}
texture_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
return texture_dict
# #####################################################
# Extract Fbx SDK Mesh Data
# #####################################################
def extract_fbx_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
tmp = control_points[i]
tmp = [tmp[0], tmp[1], tmp[2]]
positions.append(tmp)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
for i in range(len(positions)):
v = positions[i]
position = FbxVector4(v[0], v[1], v[2])
position = transform.MultNormalize(position)
positions[i] = [position[0], position[1], position[2]]
return positions
def extract_fbx_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = normals_array.GetAt(i)
normal = [normal[0], normal[1], normal[2]]
normal_values.append(normal)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
t = FbxVector4(0,0,0,1)
transform.SetRow(3, t)
for i in range(len(normal_values)):
n = normal_values[i]
normal = FbxVector4(n[0], n[1], n[2])
normal = transform.MultNormalize(normal)
normal.Normalize()
normal = [normal[0], normal[1], normal[2]]
normal_values[i] = normal
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
# mapping mode is by control points. The mesh should be smooth and soft.
# we can get normals by retrieving each control point
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
# reference mode is direct, the normal index is same as vertex index.
# get normals by the index of control vertex
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
# mapping mode is by polygon-vertex.
# we can get normals by retrieving polygon-vertex.
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
normal_values = []
normal_indices = []
# Three.js only supports one layer of normals
if len(layered_normal_values) > 0:
normal_values = layered_normal_values[0]
normal_indices = layered_normal_indices[0]
return normal_values, normal_indices
def extract_fbx_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = colors_array.GetAt(i)
color = [color.mRed, color.mGreen, color.mBlue, color.mAlpha]
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
layered_color_indices.append( color_indices )
layered_color_values.append( color_values )
color_values = []
color_indices = []
# Three.js only supports one layer of colors
if len(layered_color_values) > 0:
color_values = layered_color_values[0]
color_indices = layered_color_indices[0]
'''
# The Fbx SDK defaults mesh.Color to (0.8, 0.8, 0.8)
# This causes most models to receive incorrect vertex colors
if len(color_values) == 0:
color = mesh.Color.Get()
color_values = [[color[0], color[1], color[2]]]
color_indices = []
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
color_indices.append([0] * poly_size)
'''
return color_values, color_indices
def extract_fbx_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = uvs_array.GetAt(i)
uv = [uv[0], uv[1]]
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh_uvs.GetIndexArray().GetAt(vertexId)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
# #####################################################
# Process Mesh Geometry
# #####################################################
def generate_normal_key(normal):
return (round(normal[0], 6), round(normal[1], 6), round(normal[2], 6))
def generate_color_key(color):
return getHex(color)
def generate_uv_key(uv):
return (round(uv[0], 6), round(uv[1], 6))
def append_non_duplicate_uvs(source_uvs, dest_uvs, counts):
source_layer_count = len(source_uvs)
for layer_index in range(source_layer_count):
dest_layer_count = len(dest_uvs)
if dest_layer_count <= layer_index:
dest_uv_layer = {}
count = 0
dest_uvs.append(dest_uv_layer)
counts.append(count)
else:
dest_uv_layer = dest_uvs[layer_index]
count = counts[layer_index]
source_uv_layer = source_uvs[layer_index]
for uv in source_uv_layer:
key = generate_uv_key(uv)
if key not in dest_uv_layer:
dest_uv_layer[key] = count
count += 1
counts[layer_index] = count
return counts
def generate_unique_normals_dictionary(mesh_list):
normals_dictionary = {}
nnormals = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
node = mesh.GetNode()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
if len(normal_values) > 0:
for normal in normal_values:
key = generate_normal_key(normal)
if key not in normals_dictionary:
normals_dictionary[key] = nnormals
nnormals += 1
return normals_dictionary
def generate_unique_colors_dictionary(mesh_list):
colors_dictionary = {}
ncolors = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
color_values, color_indices = extract_fbx_vertex_colors(mesh)
if len(color_values) > 0:
for color in color_values:
key = generate_color_key(color)
if key not in colors_dictionary:
colors_dictionary[key] = ncolors
ncolors += 1
return colors_dictionary
def generate_unique_uvs_dictionary_layers(mesh_list):
uvs_dictionary_layers = []
nuvs_list = []
# Merge meshes, remove duplicate data
for mesh in mesh_list:
uv_values, uv_indices = extract_fbx_vertex_uvs(mesh)
if len(uv_values) > 0:
nuvs_list = append_non_duplicate_uvs(uv_values, uvs_dictionary_layers, nuvs_list)
return uvs_dictionary_layers
def generate_normals_from_dictionary(normals_dictionary):
normal_values = []
for key, index in sorted(normals_dictionary.items(), key = operator.itemgetter(1)):
normal_values.append(key)
return normal_values
def generate_colors_from_dictionary(colors_dictionary):
color_values = []
for key, index in sorted(colors_dictionary.items(), key = operator.itemgetter(1)):
color_values.append(key)
return color_values
def generate_uvs_from_dictionary_layers(uvs_dictionary_layers):
uv_values = []
for uvs_dictionary in uvs_dictionary_layers:
uv_values_layer = []
for key, index in sorted(uvs_dictionary.items(), key = operator.itemgetter(1)):
uv_values_layer.append(key)
uv_values.append(uv_values_layer)
return uv_values
def generate_normal_indices_for_poly(poly_index, mesh_normal_values, mesh_normal_indices, normals_to_indices):
if len(mesh_normal_indices) <= 0:
return []
poly_normal_indices = mesh_normal_indices[poly_index]
poly_size = len(poly_normal_indices)
output_poly_normal_indices = []
for v in range(poly_size):
normal_index = poly_normal_indices[v]
normal_value = mesh_normal_values[normal_index]
key = generate_normal_key(normal_value)
output_index = normals_to_indices[key]
output_poly_normal_indices.append(output_index)
return output_poly_normal_indices
def generate_color_indices_for_poly(poly_index, mesh_color_values, mesh_color_indices, colors_to_indices):
if len(mesh_color_indices) <= 0:
return []
poly_color_indices = mesh_color_indices[poly_index]
poly_size = len(poly_color_indices)
output_poly_color_indices = []
for v in range(poly_size):
color_index = poly_color_indices[v]
color_value = mesh_color_values[color_index]
key = generate_color_key(color_value)
output_index = colors_to_indices[key]
output_poly_color_indices.append(output_index)
return output_poly_color_indices
def generate_uv_indices_for_poly(poly_index, mesh_uv_values, mesh_uv_indices, uvs_to_indices):
if len(mesh_uv_indices) <= 0:
return []
poly_uv_indices = mesh_uv_indices[poly_index]
poly_size = len(poly_uv_indices)
output_poly_uv_indices = []
for v in range(poly_size):
uv_index = poly_uv_indices[v]
uv_value = mesh_uv_values[uv_index]
key = generate_uv_key(uv_value)
output_index = uvs_to_indices[key]
output_poly_uv_indices.append(output_index)
return output_poly_uv_indices
def process_mesh_vertices(mesh_list):
vertex_offset = 0
vertex_offset_list = [0]
vertices = []
for mesh in mesh_list:
node = mesh.GetNode()
mesh_vertices = extract_fbx_vertex_positions(mesh)
vertices.extend(mesh_vertices[:])
vertex_offset += len(mesh_vertices)
vertex_offset_list.append(vertex_offset)
return vertices, vertex_offset_list
def process_mesh_materials(mesh_list):
material_offset = 0
material_offset_list = [0]
materials_list = []
#TODO: remove duplicate mesh references
for mesh in mesh_list:
node = mesh.GetNode()
material_count = node.GetMaterialCount()
if material_count > 0:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
materials_list.append( material )
material_offset += material_count
material_offset_list.append(material_offset)
return materials_list, material_offset_list
def process_mesh_polygons(mesh_list, normals_to_indices, colors_to_indices, uvs_to_indices_list, vertex_offset_list, material_offset_list):
faces = []
for mesh_index in range(len(mesh_list)):
mesh = mesh_list[mesh_index]
flipWindingOrder = False
node = mesh.GetNode()
if node:
local_scale = node.EvaluateLocalScaling()
if local_scale[0] < 0 or local_scale[1] < 0 or local_scale[2] < 0:
flipWindingOrder = True
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
color_values, color_indices = extract_fbx_vertex_colors(mesh)
uv_values_layers, uv_indices_layers = extract_fbx_vertex_uvs(mesh)
for poly_index in range(poly_count):
poly_size = mesh.GetPolygonSize(poly_index)
face_normals = generate_normal_indices_for_poly(poly_index, normal_values, normal_indices, normals_to_indices)
face_colors = generate_color_indices_for_poly(poly_index, color_values, color_indices, colors_to_indices)
face_uv_layers = []
for l in range(len(uv_indices_layers)):
uv_values = uv_values_layers[l]
uv_indices = uv_indices_layers[l]
face_uv_indices = generate_uv_indices_for_poly(poly_index, uv_values, uv_indices, uvs_to_indices_list[l])
face_uv_layers.append(face_uv_indices)
face_vertices = []
for vertex_index in range(poly_size):
control_point_index = mesh.GetPolygonVertex(poly_index, vertex_index)
face_vertices.append(control_point_index)
#TODO: assign a default material to any mesh without one
if len(material_offset_list) <= mesh_index:
material_offset = 0
else:
material_offset = material_offset_list[mesh_index]
vertex_offset = vertex_offset_list[mesh_index]
if poly_size > 4:
new_face_normals = []
new_face_colors = []
new_face_uv_layers = []
for i in range(poly_size - 2):
new_face_vertices = [face_vertices[0], face_vertices[i+1], face_vertices[i+2]]
if len(face_normals):
new_face_normals = [face_normals[0], face_normals[i+1], face_normals[i+2]]
if len(face_colors):
new_face_colors = [face_colors[0], face_colors[i+1], face_colors[i+2]]
if len(face_uv_layers):
new_face_uv_layers = []
for layer in face_uv_layers:
new_face_uv_layers.append([layer[0], layer[i+1], layer[i+2]])
face = generate_mesh_face(mesh,
poly_index,
new_face_vertices,
new_face_normals,
new_face_colors,
new_face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
else:
face = generate_mesh_face(mesh,
poly_index,
face_vertices,
face_normals,
face_colors,
face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
return faces
def generate_mesh_face(mesh, polygon_index, vertex_indices, normals, colors, uv_layers, vertex_offset, material_offset, flipOrder):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = False
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
hasMaterial = True
break
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
if flipOrder:
if nVertices == 3:
vertex_indices = [vertex_indices[0], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[2], polygon_uvs[1]])
uv_layers = tmp
else:
vertex_indices = [vertex_indices[0], vertex_indices[3], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[3], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[3], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[3], polygon_uvs[2], polygon_uvs[3]])
uv_layers = tmp
for i in range(nVertices):
index = vertex_indices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
material_id = 0
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
material_id += material_offset
faceData.append( material_id )
if hasFaceVertexUvs:
for polygon_uvs in uv_layers:
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = normals[i]
faceData.append(index)
if hasFaceVertexColors:
for i in range(nVertices):
index = colors[i]
faceData.append(index)
return faceData
# #####################################################
# Generate Mesh Object (for scene output format)
# #####################################################
def generate_scene_output(node):
mesh = node.GetNodeAttribute()
# This is done in order to keep the scene output and non-scene output code DRY
mesh_list = [ mesh ]
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable automatic json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = ChunkedIndent(vertices, 15, True)
normal_values = ChunkedIndent(normal_values, 15, True)
color_values = ChunkedIndent(color_values, 15)
faces = ChunkedIndent(faces, 30)
metadata = {
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Mesh Object (for non-scene output)
# #####################################################
def generate_non_scene_output(scene):
mesh_list = generate_mesh_list(scene)
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = NoIndent(vertices)
normal_values = NoIndent(normal_values)
color_values = NoIndent(color_values)
faces = NoIndent(faces)
metadata = {
'formatVersion' : 3,
'type' : 'geometry',
'generatedBy' : 'convert-to-threejs.py',
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
def generate_mesh_list_from_hierarchy(node, mesh_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
mesh_list.append(node.GetNodeAttribute())
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
def generate_mesh_list(scene):
mesh_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
return mesh_list
# #####################################################
# Generate Embed Objects
# #####################################################
def generate_embed_dict_from_hierarchy(node, embed_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_object = generate_scene_output(node)
embed_name = getPrefixedName(node, 'Embed')
embed_dict[embed_name] = embed_object
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
def generate_embed_dict(scene):
embed_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
return embed_dict
# #####################################################
# Generate Geometry Objects
# #####################################################
def generate_geometry_object(node):
output = {
'type' : 'embedded',
'id' : getPrefixedName( node, 'Embed' )
}
return output
def generate_geometry_dict_from_hierarchy(node, geometry_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_object = generate_geometry_object(node)
geometry_name = getPrefixedName( node, 'Geometry' )
geometry_dict[geometry_name] = geometry_object
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
def generate_geometry_dict(scene):
geometry_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
return geometry_dict
# #####################################################
# Generate Light Node Objects
# #####################################################
def generate_default_light():
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = {
'type': 'DirectionalLight',
'color': getHex(color),
'intensity': intensity/100.00,
'direction': serializeVector3( direction ),
'target': getObjectName( None )
}
return output
def generate_light_object(node):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = None
if light_type == "directional":
# Three.js directional lights emit light from a point in 3d space to a target node or the origin.
# When there is no target, we need to take a point, one unit away from the origin, and move it
# into the right location so that the origin acts like the target
if node.GetTarget():
direction = position
else:
translation = FbxVector4(0,0,0,0)
scale = FbxVector4(1,1,1,1)
rotation = transform.GetR()
matrix = FbxMatrix(translation, rotation, scale)
direction = matrix.MultNormalize(FbxVector4(0,1,0,1))
output = {
'type': 'DirectionalLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'direction': serializeVector3( direction ),
'target': getObjectName( node.GetTarget() )
}
elif light_type == "point":
output = {
'type': 'PointLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get()
}
elif light_type == "spot":
output = {
'type': 'SpotLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get(),
'angle': light.OuterAngle.Get()*math.pi/180,
'exponent': light.DecayType.Get(),
'target': getObjectName( node.GetTarget() )
}
return output
def generate_ambient_light(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
output = {
'type': 'AmbientLight',
'color': getHex(ambient_color)
}
return output
# #####################################################
# Generate Camera Node Objects
# #####################################################
def generate_default_camera():
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
def generate_camera_object(node):
camera = node.GetNodeAttribute()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
name = getObjectName( node )
output = {}
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'aspect': aspect,
'near': near,
'far': far,
'position': serializeVector3( position )
}
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = {
'type': 'PerspectiveCamera',
'left': left,
'right': right,
'top': top,
'bottom': bottom,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
# #####################################################
# Generate Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate Mesh Node Object
# #####################################################
def generate_mesh_object(node):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
if not material_count > 1 and not len(material_names) > 0:
material_names.append('')
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node, True) if material_count > 1 else material_names[0]
output = {
'geometry': getPrefixedName( node, 'Geometry' ),
'material': material_name,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True,
}
return output
# #####################################################
# Generate Node Object
# #####################################################
def generate_object(node):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
name = getObjectName( node )
output = {
'fbx_type': node_type,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True
}
return output
# #####################################################
# Parse Scene Node Objects
# #####################################################
def generate_object_hierarchy(node, object_dict):
object_count = 0
if node.GetNodeAttribute() == None:
object_data = generate_object(node)
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_data = generate_mesh_object(node)
elif attribute_type == FbxNodeAttribute.eLight:
object_data = generate_light_object(node)
elif attribute_type == FbxNodeAttribute.eCamera:
object_data = generate_camera_object(node)
else:
object_data = generate_object(node)
object_count += 1
object_name = getObjectName(node)
object_children = {}
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_children)
if node.GetChildCount() > 0:
# Having 'children' above other attributes is hard to read.
# We can send it to the bottom using the last letter of the alphabet 'z'.
# This letter is removed from the final output.
if option_pretty_print:
object_data['zchildren'] = object_children
else:
object_data['children'] = object_children
object_dict[object_name] = object_data
return object_count
def generate_scene_objects(scene):
object_count = 0
object_dict = {}
ambient_light = generate_ambient_light(scene)
if ambient_light:
object_dict['AmbientLight'] = ambient_light
object_count += 1
if option_default_light:
default_light = generate_default_light()
object_dict['DefaultLight'] = default_light
object_count += 1
if option_default_camera:
default_camera = generate_default_camera()
object_dict['DefaultCamera'] = default_camera
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_dict)
return object_dict, object_count
# #####################################################
# Generate Scene Output
# #####################################################
def extract_scene(scene, filename):
global_settings = scene.GetGlobalSettings()
objects, nobjects = generate_scene_objects(scene)
textures = generate_texture_dict(scene)
materials = generate_material_dict(scene)
geometries = generate_geometry_dict(scene)
embeds = generate_embed_dict(scene)
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
position = serializeVector3( (0,0,0) )
rotation = serializeVector3( (0,0,0) )
scale = serializeVector3( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
# This does not seem to be any help here
# global_settings.GetDefaultCamera()
defcamera = camera_names[0] if len(camera_names) > 0 else ""
if option_default_camera:
defcamera = 'default_camera'
metadata = {
'formatVersion': 3.2,
'type': 'scene',
'generatedBy': 'convert-to-threejs.py',
'objects': nobjects,
'geometries': ngeometries,
'materials': nmaterials,
'textures': ntextures
}
transform = {
'position' : position,
'rotation' : rotation,
'scale' : scale
}
defaults = {
'bgcolor' : 0,
'camera' : defcamera,
'fog' : ''
}
output = {
'objects': objects,
'geometries': geometries,
'materials': materials,
'textures': textures,
'embeds': embeds,
'transform': transform,
'defaults': defaults,
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Non-Scene Output
# #####################################################
def extract_geometry(scene, filename):
output = generate_non_scene_output(scene)
return output
# #####################################################
# File Helpers
# #####################################################
def write_file(filepath, content):
index = filepath.rfind('/')
dir = filepath[0:index]
#if not os.path.exists(dir):
#os.makedirs(dir)
out = open(filepath, "w")
out.write(content.encode('utf8', 'replace'))
out.close()
def read_file(filepath):
f = open(filepath)
content = f.readlines()
f.close()
return content
def copy_textures(textures):
texture_dict = {}
for key in textures:
url = textures[key]['fullpath']
#src = replace_OutFolder2inFolder(url)
#print( src )
#print( url )
if url in texture_dict: # texture has been copied
continue
if not os.path.exists(url):
print("copy_texture error: we can't find this texture at " + url)
continue
try:
index = url.rfind('/')
if index == -1:
index = url.rfind( '\\' )
filename = url[index+1:len(url)]
saveFolder = "maps"
saveFilename = saveFolder + "/" + filename
#print( src )
#print( url )
#print( saveFilename )
if not os.path.exists(saveFolder):
os.makedirs(saveFolder)
shutil.copyfile(url, saveFilename)
texture_dict[url] = True
except IOError as e:
print "I/O error({0}): {1} {2}".format(e.errno, e.strerror, url)
def findFilesWithExt(directory, ext, include_path = True):
ext = ext.lower()
found = []
for root, dirs, files in os.walk(directory):
for filename in files:
current_ext = os.path.splitext(filename)[1].lower()
if current_ext == ext:
if include_path:
found.append(os.path.join(root, filename))
else:
found.append(filename)
return found
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--ignore-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-n', '--no-texture-copy', action='store_true', dest='notexturecopy', help="don't copy texture files", default=False)
parser.add_option('-u', '--force-prefix', action='store_true', dest='prefix', help="prefix all object names in output file to ensure uniqueness", default=False)
parser.add_option('-f', '--flatten-scene', action='store_true', dest='geometry', help="merge all geometries and apply node transforms", default=False)
parser.add_option('-y', '--force-y-up', action='store_true', dest='forceyup', help="ensure that the y axis shows up", default=False)
parser.add_option('-c', '--add-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--add-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
parser.add_option('-p', '--pretty-print', action='store_true', dest='pretty', help="prefix all object names in output file", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_copy_textures = True if not options.notexturecopy else False
option_prefix = options.prefix
option_geometry = options.geometry
option_forced_y_up = options.forceyup
option_default_camera = options.defcamera
option_default_light = options.deflight
option_pretty_print = options.pretty
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
axis_system = FbxAxisSystem.MayaYUp
if not option_forced_y_up:
# According to asset's coordinate to convert scene
upVector = scene.GetGlobalSettings().GetAxisSystem().GetUpVector();
if upVector[0] == 3:
axis_system = FbxAxisSystem.MayaZUp
axis_system.ConvertScene(scene)
inputFolder = args[0].replace( "\\", "/" );
index = args[0].rfind( "/" );
inputFolder = inputFolder[:index]
outputFolder = args[1].replace( "\\", "/" );
index = args[1].rfind( "/" );
outputFolder = outputFolder[:index]
if option_geometry:
output_content = extract_geometry(scene, os.path.basename(args[0]))
else:
output_content = extract_scene(scene, os.path.basename(args[0]))
if option_pretty_print:
output_string = json.dumps(output_content, indent=4, cls=CustomEncoder, separators=(',', ': '), sort_keys=True)
output_string = executeRegexHacks(output_string)
else:
output_string = json.dumps(output_content, separators=(',', ': '), sort_keys=True)
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_string)
if option_copy_textures:
copy_textures( output_content['textures'] )
print("\nExported Three.js file to:\n%s\n" % output_path)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0) | mit |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/commands/copy_group.py | 1 | 3140 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
from .offset_manager import OffsetManagerBase
from kafka_utils.util.client import KafkaToolClient
from kafka_utils.util.offsets import get_current_consumer_offsets
from kafka_utils.util.offsets import set_consumer_offsets
class CopyGroup(OffsetManagerBase):
@classmethod
def setup_subparser(cls, subparsers):
parser_copy_group = subparsers.add_parser(
"copy_group",
description="Copy specified consumer group details to a new group.",
)
parser_copy_group.add_argument(
'source_groupid',
help="Consumer Group to be copied.",
)
parser_copy_group.add_argument(
'dest_groupid',
help="New name for the consumer group being copied to.",
)
parser_copy_group.add_argument(
"--topic",
help="Kafka topic whose offsets will be copied into destination group"
" If no topic is specificed all topic offsets will be copied.",
)
parser_copy_group.add_argument(
"--partitions",
nargs='+',
type=int,
help="List of partitions within the topic. If no partitions are "
"specified, offsets from all partitions of the topic shall "
"be copied.",
)
parser_copy_group.set_defaults(command=cls.run)
@classmethod
def run(cls, args, cluster_config):
if args.source_groupid == args.dest_groupid:
print(
"Error: Source group ID and destination group ID are same.",
file=sys.stderr,
)
sys.exit(1)
# Setup the Kafka client
client = KafkaToolClient(cluster_config.broker_list)
client.load_metadata_for_topics()
source_topics = cls.preprocess_args(
args.source_groupid,
args.topic,
args.partitions,
cluster_config,
client,
use_admin_client=args.use_admin_client,
)
cls.copy_group_kafka(
client,
source_topics,
args.source_groupid,
args.dest_groupid,
)
@classmethod
def copy_group_kafka(cls, client, topics, source_group, destination_group):
copied_offsets = get_current_consumer_offsets(client, source_group, topics)
set_consumer_offsets(client, destination_group, copied_offsets)
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/zmq/utils/sixcerpt.py | 44 | 1886 | """Excerpts of six.py"""
# Copyright (C) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
| gpl-3.0 |
tsdmgz/ansible | test/runner/lib/cover.py | 44 | 7160 | """Code coverage utilities."""
from __future__ import absolute_import, print_function
import os
import re
from lib.target import (
walk_module_targets,
walk_compile_targets,
)
from lib.util import (
display,
ApplicationError,
run_command,
common_environment,
)
from lib.config import (
CoverageConfig,
CoverageReportConfig,
)
from lib.executor import (
Delegate,
install_command_requirements,
)
COVERAGE_DIR = 'test/results/coverage'
COVERAGE_FILE = os.path.join(COVERAGE_DIR, 'coverage')
COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
def command_coverage_combine(args):
"""Patch paths in coverage files and merge into a single file.
:type args: CoverageConfig
:rtype: list[str]
"""
coverage = initialize_coverage(args)
modules = dict((t.module, t.path) for t in list(walk_module_targets()))
coverage_files = [os.path.join(COVERAGE_DIR, f) for f in os.listdir(COVERAGE_DIR) if '=coverage.' in f]
ansible_path = os.path.abspath('lib/ansible/') + '/'
root_path = os.getcwd() + '/'
counter = 0
groups = {}
if args.all or args.stub:
sources = sorted(os.path.abspath(target.path) for target in walk_compile_targets())
else:
sources = []
if args.stub:
groups['=stub'] = dict((source, set()) for source in sources)
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
original = coverage.CoverageData()
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
if os.path.getsize(coverage_file) == 0:
display.warning('Empty coverage file: %s' % coverage_file)
continue
try:
original.read_file(coverage_file)
except Exception as ex: # pylint: disable=locally-disabled, broad-except
display.error(str(ex))
continue
for filename in original.measured_files():
arcs = set(original.arcs(filename) or [])
if not arcs:
# This is most likely due to using an unsupported version of coverage.
display.warning('No arcs found for "%s" in coverage file: %s' % (filename, coverage_file))
continue
if '/ansible_modlib.zip/ansible/' in filename:
new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif '/ansible_module_' in filename:
module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
if module_name not in modules:
display.warning('Skipping coverage of unknown module: %s' % module_name)
continue
new_name = os.path.abspath(modules[module_name])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search('^(/.*?)?/root/ansible/', filename):
new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
if group not in groups:
groups[group] = {}
arc_data = groups[group]
if filename not in arc_data:
arc_data[filename] = set()
arc_data[filename].update(arcs)
output_files = []
for group in sorted(groups):
arc_data = groups[group]
updated = coverage.CoverageData()
for filename in arc_data:
if not os.path.isfile(filename):
display.warning('Invalid coverage path: %s' % filename)
continue
updated.add_arcs({filename: list(arc_data[filename])})
if args.all:
updated.add_arcs(dict((source, []) for source in sources))
if not args.explain:
output_file = COVERAGE_FILE + group
updated.write_file(output_file)
output_files.append(output_file)
return sorted(output_files)
def command_coverage_report(args):
"""
:type args: CoverageReportConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
if args.group_by or args.stub:
display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
options = []
if args.show_missing:
options.append('--show-missing')
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'report'] + options)
def command_coverage_html(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
dir_name = 'test/results/reports/%s' % os.path.basename(output_file)
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'html', '-i', '-d', dir_name])
def command_coverage_xml(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
xml_name = 'test/results/reports/%s.xml' % os.path.basename(output_file)
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
run_command(args, env=env, cmd=['coverage', 'xml', '-i', '-o', xml_name])
def command_coverage_erase(args):
"""
:type args: CoverageConfig
"""
initialize_coverage(args)
for name in os.listdir(COVERAGE_DIR):
if not name.startswith('coverage') and '=coverage.' not in name:
continue
path = os.path.join(COVERAGE_DIR, name)
if not args.explain:
os.remove(path)
def initialize_coverage(args):
"""
:type args: CoverageConfig
:rtype: coverage
"""
if args.delegate:
raise Delegate()
if args.requirements:
install_command_requirements(args)
try:
import coverage
except ImportError:
coverage = None
if not coverage:
raise ApplicationError('You must install the "coverage" python module to use this command.')
return coverage
def get_coverage_group(args, coverage_file):
"""
:type args: CoverageConfig
:type coverage_file: str
:rtype: str
"""
parts = os.path.basename(coverage_file).split('=', 4)
if len(parts) != 5 or not parts[4].startswith('coverage.'):
return None
names = dict(
command=parts[0],
target=parts[1],
environment=parts[2],
version=parts[3],
)
group = ''
for part in COVERAGE_GROUPS:
if part in args.group_by:
group += '=%s' % names[part]
return group
| gpl-3.0 |
matt-kwong/grpc | src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py | 16 | 10830 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Private interfaces implemented by data sets used in Face-layer tests."""
import abc
import six
# face is referenced from specification in this module.
from grpc.framework.interfaces.face import face # pylint: disable=unused-import
from tests.unit.framework.interfaces.face import test_interfaces
class UnaryUnaryTestMethodImplementation(
six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
"""A controllable implementation of a unary-unary method."""
@abc.abstractmethod
def service(self, request, response_callback, context, control):
"""Services an RPC that accepts one message and produces one message.
Args:
request: The single request message for the RPC.
response_callback: A callback to be called to accept the response message
of the RPC.
context: An face.ServicerContext object.
control: A test_control.Control to control execution of this method.
Raises:
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
"""
raise NotImplementedError()
class UnaryUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
"""A type for unary-request-unary-response message pairings."""
@abc.abstractmethod
def request(self):
"""Affords a request message.
Implementations of this method should return a different message with each
call so that multiple test executions of the test method may be made with
different inputs.
Returns:
A request message.
"""
raise NotImplementedError()
@abc.abstractmethod
def verify(self, request, response, test_case):
"""Verifies that the computed response matches the given request.
Args:
request: A request message.
response: A response message.
test_case: A unittest.TestCase object affording useful assertion methods.
Raises:
AssertionError: If the request and response do not match, indicating that
there was some problem executing the RPC under test.
"""
raise NotImplementedError()
class UnaryStreamTestMethodImplementation(
six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
"""A controllable implementation of a unary-stream method."""
@abc.abstractmethod
def service(self, request, response_consumer, context, control):
"""Services an RPC that takes one message and produces a stream of messages.
Args:
request: The single request message for the RPC.
response_consumer: A stream.Consumer to be called to accept the response
messages of the RPC.
context: A face.ServicerContext object.
control: A test_control.Control to control execution of this method.
Raises:
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
"""
raise NotImplementedError()
class UnaryStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
"""A type for unary-request-stream-response message pairings."""
@abc.abstractmethod
def request(self):
"""Affords a request message.
Implementations of this method should return a different message with each
call so that multiple test executions of the test method may be made with
different inputs.
Returns:
A request message.
"""
raise NotImplementedError()
@abc.abstractmethod
def verify(self, request, responses, test_case):
"""Verifies that the computed responses match the given request.
Args:
request: A request message.
responses: A sequence of response messages.
test_case: A unittest.TestCase object affording useful assertion methods.
Raises:
AssertionError: If the request and responses do not match, indicating that
there was some problem executing the RPC under test.
"""
raise NotImplementedError()
class StreamUnaryTestMethodImplementation(
six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
"""A controllable implementation of a stream-unary method."""
@abc.abstractmethod
def service(self, response_callback, context, control):
"""Services an RPC that takes a stream of messages and produces one message.
Args:
response_callback: A callback to be called to accept the response message
of the RPC.
context: A face.ServicerContext object.
control: A test_control.Control to control execution of this method.
Returns:
A stream.Consumer with which to accept the request messages of the RPC.
The consumer returned from this method may or may not be invoked to
completion: in the case of RPC abortion, RPC Framework will simply stop
passing messages to this object. Implementations must not assume that
this object will be called to completion of the request stream or even
called at all.
Raises:
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
"""
raise NotImplementedError()
class StreamUnaryTestMessages(six.with_metaclass(abc.ABCMeta)):
"""A type for stream-request-unary-response message pairings."""
@abc.abstractmethod
def requests(self):
"""Affords a sequence of request messages.
Implementations of this method should return a different sequences with each
call so that multiple test executions of the test method may be made with
different inputs.
Returns:
A sequence of request messages.
"""
raise NotImplementedError()
@abc.abstractmethod
def verify(self, requests, response, test_case):
"""Verifies that the computed response matches the given requests.
Args:
requests: A sequence of request messages.
response: A response message.
test_case: A unittest.TestCase object affording useful assertion methods.
Raises:
AssertionError: If the requests and response do not match, indicating that
there was some problem executing the RPC under test.
"""
raise NotImplementedError()
class StreamStreamTestMethodImplementation(
six.with_metaclass(abc.ABCMeta, test_interfaces.Method)):
"""A controllable implementation of a stream-stream method."""
@abc.abstractmethod
def service(self, response_consumer, context, control):
"""Services an RPC that accepts and produces streams of messages.
Args:
response_consumer: A stream.Consumer to be called to accept the response
messages of the RPC.
context: A face.ServicerContext object.
control: A test_control.Control to control execution of this method.
Returns:
A stream.Consumer with which to accept the request messages of the RPC.
The consumer returned from this method may or may not be invoked to
completion: in the case of RPC abortion, RPC Framework will simply stop
passing messages to this object. Implementations must not assume that
this object will be called to completion of the request stream or even
called at all.
Raises:
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
"""
raise NotImplementedError()
class StreamStreamTestMessages(six.with_metaclass(abc.ABCMeta)):
"""A type for stream-request-stream-response message pairings."""
@abc.abstractmethod
def requests(self):
"""Affords a sequence of request messages.
Implementations of this method should return a different sequences with each
call so that multiple test executions of the test method may be made with
different inputs.
Returns:
A sequence of request messages.
"""
raise NotImplementedError()
@abc.abstractmethod
def verify(self, requests, responses, test_case):
"""Verifies that the computed response matches the given requests.
Args:
requests: A sequence of request messages.
responses: A sequence of response messages.
test_case: A unittest.TestCase object affording useful assertion methods.
Raises:
AssertionError: If the requests and responses do not match, indicating
that there was some problem executing the RPC under test.
"""
raise NotImplementedError()
class TestService(six.with_metaclass(abc.ABCMeta)):
"""A specification of implemented methods to use in tests."""
@abc.abstractmethod
def unary_unary_scenarios(self):
"""Affords unary-request-unary-response test methods and their messages.
Returns:
A dict from method group-name pair to implementation/messages pair. The
first element of the pair is a UnaryUnaryTestMethodImplementation object
and the second element is a sequence of UnaryUnaryTestMethodMessages
objects.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_stream_scenarios(self):
"""Affords unary-request-stream-response test methods and their messages.
Returns:
A dict from method group-name pair to implementation/messages pair. The
first element of the pair is a UnaryStreamTestMethodImplementation
object and the second element is a sequence of
UnaryStreamTestMethodMessages objects.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_unary_scenarios(self):
"""Affords stream-request-unary-response test methods and their messages.
Returns:
A dict from method group-name pair to implementation/messages pair. The
first element of the pair is a StreamUnaryTestMethodImplementation
object and the second element is a sequence of
StreamUnaryTestMethodMessages objects.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_stream_scenarios(self):
"""Affords stream-request-stream-response test methods and their messages.
Returns:
A dict from method group-name pair to implementation/messages pair. The
first element of the pair is a StreamStreamTestMethodImplementation
object and the second element is a sequence of
StreamStreamTestMethodMessages objects.
"""
raise NotImplementedError()
| apache-2.0 |
pogaku9/aws-datalake-quickstart-looker-isv-integration | scripts/lambdas/writetoES/requests/packages/chardet/euckrfreq.py | 10 | 46574 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| apache-2.0 |
nuuuboo/odoo | addons/account_bank_statement_extensions/__openerp__.py | 378 | 2357 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bank Statement Extensions to Support e-banking',
'version': '0.3',
'license': 'AGPL-3',
'author': 'Noviat',
'category': 'Generic Modules/Accounting',
'description': '''
Module that extends the standard account_bank_statement_line object for improved e-banking support.
===================================================================================================
This module adds:
-----------------
- valuta date
- batch payments
- traceability of changes to bank statement lines
- bank statement line views
- bank statements balances report
- performance improvements for digital import of bank statement (via
'ebanking_import' context flag)
- name_search on res.partner.bank enhanced to allow search on bank
and iban account numbers
''',
'depends': ['account'],
'demo': [],
'data' : [
'security/ir.model.access.csv',
'account_bank_statement_view.xml',
'account_bank_statement_report.xml',
'wizard/confirm_statement_line_wizard.xml',
'wizard/cancel_statement_line_wizard.xml',
'data/account_bank_statement_extensions_data.xml',
'views/report_bankstatementbalance.xml',
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
frogbywyplay/genbox_xintegtools | tests/test_xbump_ebuild_updater.py | 1 | 5302 | #!/usr/bin/python
#
# Copyright (C) 2006-2014 Wyplay, All Rights Reserved.
# This file is part of xintegtools.
#
# xintegtools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# xintegtools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see file COPYING.
# If not, see <http://www.gnu.org/licenses/>.
#
#
from unittest import TestCase, main
from mock import patch, mock_open
from xintegtools.xbump.ebuild_updater import TargetEbuildUpdater
class TargetEbuildUpdaterTester(TestCase):
@patch('xintegtools.xbump.ebuild_updater.Ebuild')
@patch('__builtin__.open', new_callable=mock_open)
def test_is_target_ebuild(self, mock_open_, mock_ebuild):
updater = TargetEbuildUpdater('mock')
mock_ebuild.inherited.return_value = 'git-r3 eutils toolchain-funcs multilib'
self.assertFalse(updater.is_target_ebuild())
mock_ebuild.inherited.return_value = 'git-r3 eutils target-r1 overlays'
self.assertFalse(updater.is_target_ebuild())
@patch('xintegtools.xbump.ebuild_updater.GitRemote')
@patch('xintegtools.xbump.ebuild_updater.TargetEbuildContent')
@patch('__builtin__.open', new_callable=mock_open)
def test_update_branch(self, mock_open_, mock_targetebuildcontent, mock_git):
updater = TargetEbuildUpdater('mock')
self.assertFalse(updater.update_branch(str()))
mock_git.return_value.branch_exists.return_value = False
self.assertFalse(updater.update_branch('master'))
mock_git.return_value.branch_exists.return_value = True
mock_targetebuildcontent.branch.return_value = 'master'
self.assertTrue(updater.update_branch('master'))
mock_targetebuildcontent.branch.side_effect = ValueError('mock')
self.assertTrue(updater.update_branch('1.5/rb'))
@patch('xintegtools.xbump.ebuild_updater.GitRemote')
@patch('xintegtools.xbump.ebuild_updater.TargetEbuildContent')
@patch('__builtin__.open', new_callable=mock_open)
def test_update_revision(self, mock_open_, mock_targetebuildcontent, mock_git):
updater = TargetEbuildUpdater('mock')
mock_git.return_value.resolve_branch.return_value = str()
self.assertFalse(updater.update_revision('HEAD'))
mock_git.return_value.resolve_branch.return_value = 'bad0' * 10
mock_git.return_value.get_tag_from_sha1.return_value = str()
self.assertFalse(updater.update_revision('HEAD'))
mock_git.return_value.get_tag_from_sha1.return_value = '2.5.1'
mock_targetebuildcontent.commit.return_value = 'bad0' * 10
self.assertTrue(updater.update_revision('HEAD'))
mock_targetebuildcontent.commit.return_value = 'bad1' * 10
self.assertTrue(updater.update_revision('HEAD'))
#mock_targetebuildcontent.return_value.commit.side_effect = ['bad1' * 10, ValueError('mock')]
#self.assertFalse(updater.update_revision('HEAD'))
@patch('xintegtools.xbump.ebuild_updater.GitRemote')
@patch('xintegtools.xbump.ebuild_updater.TargetEbuildContent')
@patch('__builtin__.open', new_callable=mock_open)
def test_update_overlays(self, mock_open_, mock_targetebuildcontent, mock_git):
updater = TargetEbuildUpdater('mock')
self.assertTrue(updater.update_overlays(str()))
overlays = 'foo:899e51d3d2b94b694dfc9976ee37e57d63d7829e,bar:7f8ac3a7e773e09fb591e3b34f057c16d15c80e9'
mock_targetebuildcontent.return_value.overlays.keys.return_value = ['foo', 'bar', 'base', 'board']
mock_git.return_value.resolve_branch.return_value = 'a' * 40
self.assertTrue(updater.update_overlays(overlays))
@patch('xintegtools.xbump.ebuild_updater.GitRemote')
@patch('xintegtools.xbump.ebuild_updater.TargetEbuildContent')
@patch('xintegtools.xbump.ebuild_updater.Ebuild')
@patch('__builtin__.open', new_callable=mock_open)
def test_compute_revision(self, mock_open_, mock_ebuild, mock_targetebuildcontent, mock_git):
pass
@patch('xintegtools.xbump.ebuild_updater.TargetEbuildContent')
@patch('xintegtools.xbump.ebuild_updater.Ebuild')
@patch('__builtin__.open', new_callable=mock_open)
def test_release_ebuild(self, mock_open_, mock_ebuild, mock_targetebuildcontent):
mock_targetebuildcontent.return_value.write_into.return_value = True
mock_ebuild.return_value.overlay.return_value = '/var/lib/layman/targets'
mock_ebuild.return_value.category.return_value = 'product-targets'
mock_ebuild.return_value.name.return_value = 'frog'
updater = TargetEbuildUpdater('mock')
self.assertEqual(updater.release_ebuild(str()), str())
version = '3.6.9'
mock_targetebuildcontent.return_value.write_into.return_value = False
self.assertEqual(updater.release_ebuild(version), str())
if __name__ == '__main__':
main()
| gpl-2.0 |
dkillick/iris | lib/iris/tests/unit/analysis/__init__.py | 16 | 2045 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :mod:`iris.analysis` package."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.analysis import Linear
from iris.tests import mock
class Test_Linear(tests.IrisTest):
def setUp(self):
self.extrap = 'some extrapolation'
def test___init__(self):
linear = Linear(extrapolation_mode=self.extrap)
self.assertEqual(getattr(linear, 'extrapolation_mode', None),
self.extrap)
@mock.patch('iris.analysis.LinearInterpolator', name='LinearInterpolator')
def test_interpolator(self, linear_interp_patch):
mock_interpolator = mock.Mock(name='mocked linear interpolator')
linear_interp_patch.return_value = mock_interpolator
linear = Linear(self.extrap)
cube = mock.Mock(name='cube')
coords = mock.Mock(name='coords')
interpolator = linear.interpolator(cube, coords)
self.assertIs(interpolator, mock_interpolator)
linear_interp_patch.assert_called_once_with(
cube, coords, extrapolation_mode=self.extrap)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
mrquim/mrquimrepo | script.module.youtube.dl/lib/youtube_dl/extractor/inc.py | 40 | 1539 | from __future__ import unicode_literals
from .common import InfoExtractor
from .kaltura import KalturaIE
class IncIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?inc\.com/(?:[^/]+/)+(?P<id>[^.]+).html'
_TESTS = [{
'url': 'http://www.inc.com/tip-sheet/bill-gates-says-these-5-books-will-make-you-smarter.html',
'md5': '7416739c9c16438c09fa35619d6ba5cb',
'info_dict': {
'id': '1_wqig47aq',
'ext': 'mov',
'title': 'Bill Gates Says These 5 Books Will Make You Smarter',
'description': 'md5:bea7ff6cce100886fc1995acb743237e',
'timestamp': 1474414430,
'upload_date': '20160920',
'uploader_id': 'video@inc.com',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.inc.com/video/david-whitford/founders-forum-tripadvisor-steve-kaufer-most-enjoyable-moment-for-entrepreneur.html',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
partner_id = self._search_regex(
r'var\s+_?bizo_data_partner_id\s*=\s*["\'](\d+)', webpage, 'partner id')
kaltura_id = self._parse_json(self._search_regex(
r'pageInfo\.videos\s*=\s*\[(.+)\];', webpage, 'kaltura id'),
display_id)['vid_kaltura_id']
return self.url_result(
'kaltura:%s:%s' % (partner_id, kaltura_id), KalturaIE.ie_key())
| gpl-2.0 |
mbauskar/alec_frappe5_erpnext | erpnext/home/doctype/feed/feed.py | 37 | 1545 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
class Feed(Document):
pass
def on_doctype_update():
if not frappe.db.sql("""show index from `tabFeed`
where Key_name="feed_doctype_docname_index" """):
frappe.db.commit()
frappe.db.sql("""alter table `tabFeed`
add index feed_doctype_docname_index(doc_type, doc_name)""")
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
if not frappe.permissions.apply_user_permissions("Feed", "read", user):
return ""
user_permissions = frappe.defaults.get_user_permissions(user)
can_read = frappe.get_user(user).get_can_read()
can_read_doctypes = ['"{}"'.format(doctype) for doctype in
list(set(can_read) - set(user_permissions.keys()))]
if not can_read_doctypes:
return ""
conditions = ["tabFeed.doc_type in ({})".format(", ".join(can_read_doctypes))]
if user_permissions:
can_read_docs = []
for doctype, names in user_permissions.items():
for n in names:
can_read_docs.append('"{}|{}"'.format(doctype, n))
if can_read_docs:
conditions.append("concat_ws('|', tabFeed.doc_type, tabFeed.doc_name) in ({})".format(
", ".join(can_read_docs)))
return "(" + " or ".join(conditions) + ")"
def has_permission(doc, user):
return frappe.has_permission(doc.doc_type, "read", doc.doc_name, user=user)
| agpl-3.0 |
philz/avro | src/test/py/testioreflect.py | 2 | 1888 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import avro.schema as schema
import avro.reflectio as reflectio
import testio
_PKGNAME = "org.apache.avro.test."
def dyvalidator(schm, object):
return reflectio.validate(schm, _PKGNAME, object)
class DyRandomData(testio.RandomData):
def nextdata(self, schm, d=0):
if schm.gettype() == schema.RECORD:
clazz = reflectio.gettype(schm, _PKGNAME)
result = clazz()
for field in schm.getfields().values():
result.__setattr__(field.getname(), self.nextdata(field.getschema(),d))
return result
else:
return testio.RandomData.nextdata(self, schm, d)
class ReflectDReader(reflectio.ReflectDatumReader):
def __init__(self, schm=None):
reflectio.ReflectDatumReader.__init__(self, _PKGNAME, schm)
class ReflectDWriter(reflectio.ReflectDatumWriter):
def __init__(self, schm=None):
reflectio.ReflectDatumWriter.__init__(self, _PKGNAME, schm)
class TestSchema(testio.TestSchema):
def __init__(self, methodName):
testio.TestSchema.__init__(self, methodName, dyvalidator, ReflectDWriter,
ReflectDReader, DyRandomData, False)
| apache-2.0 |
hkoof/urwid | urwid/lcd_display.py | 5 | 16247 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid LCD display module
# Copyright (C) 2010 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from display_common import BaseScreen
import time
class LCDScreen(BaseScreen):
def set_terminal_properties(self, colors=None, bright_is_bold=None,
has_underline=None):
pass
def set_mouse_tracking(self, enable=True):
pass
def set_input_timeouts(self, *args):
pass
def reset_default_terminal_palette(self, *args):
pass
def draw_screen(self, (cols, rows), r ):
pass
def clear(self):
pass
def get_cols_rows(self):
return self.DISPLAY_SIZE
class CFLCDScreen(LCDScreen):
"""
Common methods for Crystal Fontz LCD displays
"""
KEYS = [None, # no key with code 0
'up_press', 'down_press', 'left_press',
'right_press', 'enter_press', 'exit_press',
'up_release', 'down_release', 'left_release',
'right_release', 'enter_release', 'exit_release',
'ul_press', 'ur_press', 'll_press', 'lr_press',
'ul_release', 'ur_release', 'll_release', 'lr_release']
CMD_PING = 0
CMD_VERSION = 1
CMD_CLEAR = 6
CMD_CGRAM = 9
CMD_CURSOR_POSITION = 11 # data = [col, row]
CMD_CURSOR_STYLE = 12 # data = [style (0-4)]
CMD_LCD_CONTRAST = 13 # data = [contrast (0-255)]
CMD_BACKLIGHT = 14 # data = [power (0-100)]
CMD_LCD_DATA = 31 # data = [col, row] + text
CMD_GPO = 34 # data = [pin(0-12), value(0-100)]
# sent from device
CMD_KEY_ACTIVITY = 0x80
CMD_ACK = 0x40 # in high two bits ie. & 0xc0
CURSOR_NONE = 0
CURSOR_BLINKING_BLOCK = 1
CURSOR_UNDERSCORE = 2
CURSOR_BLINKING_BLOCK_UNDERSCORE = 3
CURSOR_INVERTING_BLINKING_BLOCK = 4
MAX_PACKET_DATA_LENGTH = 22
colors = 1
has_underline = False
def __init__(self, device_path, baud):
"""
device_path -- eg. '/dev/ttyUSB0'
baud -- baud rate
"""
super(CFLCDScreen, self).__init__()
self.device_path = device_path
from serial import Serial
self._device = Serial(device_path, baud, timeout=0)
self._unprocessed = ""
@classmethod
def get_crc(cls, buf):
# This seed makes the output of this shift based algorithm match
# the table based algorithm. The center 16 bits of the 32-bit
# "newCRC" are used for the CRC. The MSB of the lower byte is used
# to see what bit was shifted out of the center 16 bit CRC
# accumulator ("carry flag analog");
newCRC = 0x00F32100
for byte in buf:
# Push this byte’s bits through a software
# implementation of a hardware shift & xor.
for bit_count in range(8):
# Shift the CRC accumulator
newCRC >>= 1
# The new MSB of the CRC accumulator comes
# from the LSB of the current data byte.
if ord(byte) & (0x01 << bit_count):
newCRC |= 0x00800000
# If the low bit of the current CRC accumulator was set
# before the shift, then we need to XOR the accumulator
# with the polynomial (center 16 bits of 0x00840800)
if newCRC & 0x00000080:
newCRC ^= 0x00840800
# All the data has been done. Do 16 more bits of 0 data.
for bit_count in range(16):
# Shift the CRC accumulator
newCRC >>= 1
# If the low bit of the current CRC accumulator was set
# before the shift we need to XOR the accumulator with
# 0x00840800.
if newCRC & 0x00000080:
newCRC ^= 0x00840800
# Return the center 16 bits, making this CRC match the one’s
# complement that is sent in the packet.
return ((~newCRC)>>8) & 0xffff
def _send_packet(self, command, data):
"""
low-level packet sending.
Following the protocol requires waiting for ack packet between
sending each packet to the device.
"""
buf = chr(command) + chr(len(data)) + data
crc = self.get_crc(buf)
buf = buf + chr(crc & 0xff) + chr(crc >> 8)
self._device.write(buf)
def _read_packet(self):
"""
low-level packet reading.
returns (command/report code, data) or None
This method stored data read and tries to resync when bad data
is received.
"""
# pull in any new data available
self._unprocessed = self._unprocessed + self._device.read()
while True:
try:
command, data, unprocessed = self._parse_data(self._unprocessed)
self._unprocessed = unprocessed
return command, data
except self.MoreDataRequired:
return
except self.InvalidPacket:
# throw out a byte and try to parse again
self._unprocessed = self._unprocessed[1:]
class InvalidPacket(Exception):
pass
class MoreDataRequired(Exception):
pass
@classmethod
def _parse_data(cls, data):
"""
Try to read a packet from the start of data, returning
(command/report code, packet_data, remaining_data)
or raising InvalidPacket or MoreDataRequired
"""
if len(data) < 2:
raise cls.MoreDataRequired
command = ord(data[0])
plen = ord(data[1])
if plen > cls.MAX_PACKET_DATA_LENGTH:
raise cls.InvalidPacket("length value too large")
if len(data) < plen + 4:
raise cls.MoreDataRequired
crc = cls.get_crc(data[:2 + plen])
pcrc = ord(data[2 + plen]) + (ord(data[3 + plen]) << 8 )
if crc != pcrc:
raise cls.InvalidPacket("CRC doesn't match")
return (command, data[2:2 + plen], data[4 + plen:])
class KeyRepeatSimulator(object):
"""
Provide simulated repeat key events when given press and
release events.
If two or more keys are pressed disable repeating until all
keys are released.
"""
def __init__(self, repeat_delay, repeat_next):
"""
repeat_delay -- seconds to wait before starting to repeat keys
repeat_next -- time between each repeated key
"""
self.repeat_delay = repeat_delay
self.repeat_next = repeat_next
self.pressed = {}
self.multiple_pressed = False
def press(self, key):
if self.pressed:
self.multiple_pressed = True
self.pressed[key] = time.time()
def release(self, key):
if key not in self.pressed:
return # ignore extra release events
del self.pressed[key]
if not self.pressed:
self.multiple_pressed = False
def next_event(self):
"""
Return (remaining, key) where remaining is the number of seconds
(float) until the key repeat event should be sent, or None if no
events are pending.
"""
if len(self.pressed) != 1 or self.multiple_pressed:
return
for key in self.pressed:
return max(0, self.pressed[key] + self.repeat_delay
- time.time()), key
def sent_event(self):
"""
Cakk this method when you have sent a key repeat event so the
timer will be reset for the next event
"""
if len(self.pressed) != 1:
return # ignore event that shouldn't have been sent
for key in self.pressed:
self.pressed[key] = (
time.time() - self.repeat_delay + self.repeat_next)
return
class CF635Screen(CFLCDScreen):
u"""
Crystal Fontz 635 display
20x4 character display + cursor
no foreground/background colors or settings supported
see CGROM for list of close unicode matches to characters available
6 button input
up, down, left, right, enter (check mark), exit (cross)
"""
DISPLAY_SIZE = (20, 4)
# ① through ⑧ are programmable CGRAM (chars 0-7, repeated at 8-15)
# double arrows (⇑⇓) appear as double arrowheads (chars 18, 19)
# ⑴ resembles a bell
# ⑵ resembles a filled-in "Y"
# ⑶ is the letters "Pt" together
# partial blocks (▇▆▄▃▁) are actually shorter versions of (▉▋▌▍▏)
# both groups are intended to draw horizontal bars with pixel
# precision, use ▇*[▆▄▃▁]? for a thin bar or ▉*[▋▌▍▏]? for a thick bar
CGROM = (
u"①②③④⑤⑥⑦⑧①②③④⑤⑥⑦⑧"
u"►◄⇑⇓«»↖↗↙↘▲▼↲^ˇ█"
u" !\"#¤%&'()*+,-./"
u"0123456789:;<=>?"
u"¡ABCDEFGHIJKLMNO"
u"PQRSTUVWXYZÄÖÑܧ"
u"¿abcdefghijklmno"
u"pqrstuvwxyzäöñüà"
u"⁰¹²³⁴⁵⁶⁷⁸⁹½¼±≥≤μ"
u"♪♫⑴♥♦⑵⌜⌟“”()αɛδ∞"
u"@£$¥èéùìòÇᴾØøʳÅå"
u"⌂¢ΦτλΩπΨΣθΞ♈ÆæßÉ"
u"ΓΛΠϒ_ÈÊêçğŞşİι~◊"
u"▇▆▄▃▁ƒ▉▋▌▍▏⑶◽▪↑→"
u"↓←ÁÍÓÚÝáíóúýÔôŮů"
u"ČĔŘŠŽčĕřšž[\]{|}")
cursor_style = CFLCDScreen.CURSOR_INVERTING_BLINKING_BLOCK
def __init__(self, device_path, baud=115200,
repeat_delay=0.5, repeat_next=0.125,
key_map=['up', 'down', 'left', 'right', 'enter', 'esc']):
"""
device_path -- eg. '/dev/ttyUSB0'
baud -- baud rate
repeat_delay -- seconds to wait before starting to repeat keys
repeat_next -- time between each repeated key
key_map -- the keys to send for this device's buttons
"""
super(CF635Screen, self).__init__(device_path, baud)
self.repeat_delay = repeat_delay
self.repeat_next = repeat_next
self.key_repeat = KeyRepeatSimulator(repeat_delay, repeat_next)
self.key_map = key_map
self._last_command = None
self._last_command_time = 0
self._command_queue = []
self._screen_buf = None
self._previous_canvas = None
self._update_cursor = False
def get_input_descriptors(self):
"""
return the fd from our serial device so we get called
on input and responses
"""
return [self._device.fd]
def get_input_nonblocking(self):
"""
Return a (next_input_timeout, keys_pressed, raw_keycodes)
tuple.
The protocol for our device requires waiting for acks between
each command, so this method responds to those as well as key
press and release events.
Key repeat events are simulated here as the device doesn't send
any for us.
raw_keycodes are the bytes of messages we received, which might
not seem to have any correspondence to keys_pressed.
"""
input = []
raw_input = []
timeout = None
while True:
packet = self._read_packet()
if not packet:
break
command, data = packet
if command == self.CMD_KEY_ACTIVITY and data:
d0 = ord(data[0])
if 1 <= d0 <= 12:
release = d0 > 6
keycode = d0 - (release * 6) - 1
key = self.key_map[keycode]
if release:
self.key_repeat.release(key)
else:
input.append(key)
self.key_repeat.press(key)
raw_input.append(d0)
elif command & 0xc0 == 0x40: # "ACK"
if command & 0x3f == self._last_command:
self._send_next_command()
next_repeat = self.key_repeat.next_event()
if next_repeat:
timeout, key = next_repeat
if not timeout:
input.append(key)
self.key_repeat.sent_event()
timeout = None
return timeout, input, []
def _send_next_command(self):
"""
send out the next command in the queue
"""
if not self._command_queue:
self._last_command = None
return
command, data = self._command_queue.pop(0)
self._send_packet(command, data)
self._last_command = command # record command for ACK
self._last_command_time = time.time()
def queue_command(self, command, data):
self._command_queue.append((command, data))
# not waiting? send away!
if self._last_command is None:
self._send_next_command()
def draw_screen(self, size, canvas):
assert size == self.DISPLAY_SIZE
if self._screen_buf:
osb = self._screen_buf
else:
osb = []
sb = []
y = 0
for row in canvas.content():
text = []
for a, cs, run in row:
text.append(run)
if not osb or osb[y] != text:
self.queue_command(self.CMD_LCD_DATA, chr(0) + chr(y) +
"".join(text))
sb.append(text)
y += 1
if (self._previous_canvas and
self._previous_canvas.cursor == canvas.cursor and
(not self._update_cursor or not canvas.cursor)):
pass
elif canvas.cursor is None:
self.queue_command(self.CMD_CURSOR_STYLE, chr(self.CURSOR_NONE))
else:
x, y = canvas.cursor
self.queue_command(self.CMD_CURSOR_POSITION, chr(x) + chr(y))
self.queue_command(self.CMD_CURSOR_STYLE, chr(self.cursor_style))
self._update_cursor = False
self._screen_buf = sb
self._previous_canvas = canvas
def program_cgram(self, index, data):
"""
Program character data. Characters available as chr(0) through
chr(7), and repeated as chr(8) through chr(15).
index -- 0 to 7 index of character to program
data -- list of 8, 6-bit integer values top to bottom with MSB
on the left side of the character.
"""
assert 0 <= index <= 7
assert len(data) == 8
self.queue_command(self.CMD_CGRAM, chr(index) +
"".join([chr(x) for x in data]))
def set_cursor_style(self, style):
"""
style -- CURSOR_BLINKING_BLOCK, CURSOR_UNDERSCORE,
CURSOR_BLINKING_BLOCK_UNDERSCORE or
CURSOR_INVERTING_BLINKING_BLOCK
"""
assert 1 <= style <= 4
self.cursor_style = style
self._update_cursor = True
def set_backlight(self, value):
"""
Set backlight brightness
value -- 0 to 100
"""
assert 0 <= value <= 100
self.queue_command(self.CMD_BACKLIGHT, chr(value))
def set_lcd_contrast(self, value):
"""
value -- 0 to 255
"""
assert 0 <= value <= 255
self.queue_command(self.CMD_LCD_CONTRAST, chr(value))
def set_led_pin(self, led, rg, value):
"""
led -- 0 to 3
rg -- 0 for red, 1 for green
value -- 0 to 100
"""
assert 0 <= led <= 3
assert rg in (0, 1)
assert 0 <= value <= 100
self.queue_command(self.CMD_GPO, chr(12 - 2 * led - rg) +
chr(value))
| lgpl-2.1 |
briancurtin/pyrax | samples/images/list_image_members.py | 13 | 1389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2014 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
imgs = pyrax.images
print("This will loop through all your private images and list the members for "
"each.")
images = imgs.list(visibility="private")
if not images:
print("No images exist.")
exit()
for image in images:
members = imgs.list_image_members(image)
if not members:
print("Image %s: no members" % image.id)
else:
print("Image %s:" % image.id)
for member in members:
print(" %s (%s)" % (member.id, member.status))
| apache-2.0 |
gordon-elliott/glod | src/glod/model/address.py | 1 | 1353 | __copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
from a_tuin.metadata import StringField, ObjectFieldGroupBase, Collection
COUNTRY_ISO_LOOKUP = {
'GB': 'UK',
}
class Address(ObjectFieldGroupBase):
# Data usage
#
# 1. delivering messages by postal system
# 2. arranging house visits
#
public_interface = (
StringField('address1', required=True),
StringField('address2'),
StringField('address3'),
StringField('county'),
StringField('countryISO', required=True),
StringField('eircode'),
StringField('telephone'),
)
def post_label(self, addressees=None):
address_1 = self.address1
address_2 = self.address2
if address_1 and address_2 and len(address_1) <= 3: # it's most likely a house number
address_2 = f"{address_1}, {address_2}"
address_1 = None
label_fields = filter(
lambda a: a, # drop None and empty strings
(
addressees,
address_1,
address_2,
self.address3,
self.county,
self.eircode,
COUNTRY_ISO_LOOKUP.get(self.countryISO)
)
)
return ",\n".join(label_fields)
class AddressCollection(Collection):
pass
| mit |
hyperized/ansible | lib/ansible/module_utils/network/iosxr/facts/facts.py | 17 | 2727 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The facts class for iosxr
this file validates each subset of facts and selectively
calls the appropriate facts gathering function
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.common.facts.facts import FactsBase
from ansible.module_utils.network.iosxr.facts.legacy.base import Default, Hardware, Interfaces, Config
from ansible.module_utils.network.iosxr.facts.lacp.lacp import LacpFacts
from ansible.module_utils.network.iosxr.facts.lacp_interfaces.lacp_interfaces import Lacp_interfacesFacts
from ansible.module_utils.network.iosxr.facts.lldp_global.lldp_global import Lldp_globalFacts
from ansible.module_utils.network.iosxr.facts.lldp_interfaces.lldp_interfaces import Lldp_interfacesFacts
from ansible.module_utils.network.iosxr.facts.interfaces.interfaces import InterfacesFacts
from ansible.module_utils.network.iosxr.facts.lag_interfaces.lag_interfaces import Lag_interfacesFacts
from ansible.module_utils.network.iosxr.facts.l2_interfaces.l2_interfaces import L2_InterfacesFacts
from ansible.module_utils.network.iosxr.facts.l3_interfaces.l3_interfaces import L3_InterfacesFacts
FACT_LEGACY_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
FACT_RESOURCE_SUBSETS = dict(
lacp=LacpFacts,
lacp_interfaces=Lacp_interfacesFacts,
lldp_global=Lldp_globalFacts,
lldp_interfaces=Lldp_interfacesFacts,
interfaces=InterfacesFacts,
l2_interfaces=L2_InterfacesFacts,
lag_interfaces=Lag_interfacesFacts,
l3_interfaces=L3_InterfacesFacts
)
class Facts(FactsBase):
""" The fact class for iosxr
"""
VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys())
VALID_RESOURCE_SUBSETS = frozenset(FACT_RESOURCE_SUBSETS.keys())
def __init__(self, module):
super(Facts, self).__init__(module)
def get_facts(self, legacy_facts_type=None, resource_facts_type=None, data=None):
""" Collect the facts for iosxr
:param legacy_facts_type: List of legacy facts types
:param resource_facts_type: List of resource fact types
:param data: previously collected conf
:rtype: dict
:return: the facts gathered
"""
if self.VALID_RESOURCE_SUBSETS:
self.get_network_resources_facts(FACT_RESOURCE_SUBSETS, resource_facts_type, data)
if self.VALID_LEGACY_GATHER_SUBSETS:
self.get_network_legacy_facts(FACT_LEGACY_SUBSETS, legacy_facts_type)
return self.ansible_facts, self._warnings
| gpl-3.0 |
saisaizhang/Food | flask/lib/python2.7/site-packages/migrate/tests/fixture/database.py | 15 | 6889 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import sys
import six
from decorator import decorator
from sqlalchemy import create_engine, Table, MetaData
from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import create_session
from sqlalchemy.pool import StaticPool
from migrate.changeset.schema import ColumnDelta
from migrate.versioning.util import Memoize
from migrate.tests.fixture.base import Base
from migrate.tests.fixture.pathed import Pathed
log = logging.getLogger(__name__)
@Memoize
def readurls():
"""read URLs from config file return a list"""
# TODO: remove tmpfile since sqlite can store db in memory
filename = 'test_db.cfg' if six.PY2 else "test_db_py3.cfg"
ret = list()
tmpfile = Pathed.tmp()
fullpath = os.path.join(os.curdir, filename)
try:
fd = open(fullpath)
except IOError:
raise IOError("""You must specify the databases to use for testing!
Copy %(filename)s.tmpl to %(filename)s and edit your database URLs.""" % locals())
for line in fd:
if line.startswith('#'):
continue
line = line.replace('__tmp__', tmpfile).strip()
ret.append(line)
fd.close()
return ret
def is_supported(url, supported, not_supported):
db = url.split(':', 1)[0]
if supported is not None:
if isinstance(supported, six.string_types):
return supported == db
else:
return db in supported
elif not_supported is not None:
if isinstance(not_supported, six.string_types):
return not_supported != db
else:
return not (db in not_supported)
return True
def usedb(supported=None, not_supported=None):
"""Decorates tests to be run with a database connection
These tests are run once for each available database
@param supported: run tests for ONLY these databases
@param not_supported: run tests for all databases EXCEPT these
If both supported and not_supported are empty, all dbs are assumed
to be supported
"""
if supported is not None and not_supported is not None:
raise AssertionError("Can't specify both supported and not_supported in fixture.db()")
urls = readurls()
my_urls = [url for url in urls if is_supported(url, supported, not_supported)]
@decorator
def dec(f, self, *a, **kw):
failed_for = []
fail = False
for url in my_urls:
try:
log.debug("Running test with engine %s", url)
try:
self._setup(url)
except sa_exc.OperationalError:
log.info('Backend %s is not available, skip it', url)
continue
except Exception as e:
setup_exception = e
else:
setup_exception = None
try:
f(self, *a, **kw)
finally:
try:
self._teardown()
except Exception as e:
teardown_exception=e
else:
teardown_exception=None
if setup_exception or teardown_exception:
raise RuntimeError((
'Exception during _setup/_teardown:\n'
'setup: %r\n'
'teardown: %r\n'
)%(setup_exception,teardown_exception))
except Exception:
failed_for.append(url)
fail = sys.exc_info()
for url in failed_for:
log.error('Failed for %s', url)
if fail:
# cause the failure :-)
six.reraise(*fail)
return dec
class DB(Base):
# Constants: connection level
NONE = 0 # No connection; just set self.url
CONNECT = 1 # Connect; no transaction
TXN = 2 # Everything in a transaction
level = TXN
def _engineInfo(self, url=None):
if url is None:
url = self.url
return url
def _setup(self, url):
self._connect(url)
# make sure there are no tables lying around
meta = MetaData(self.engine)
meta.reflect()
meta.drop_all()
def _teardown(self):
self._disconnect()
def _connect(self, url):
self.url = url
# TODO: seems like 0.5.x branch does not work with engine.dispose and staticpool
#self.engine = create_engine(url, echo=True, poolclass=StaticPool)
self.engine = create_engine(url, echo=True)
# silence the logger added by SA, nose adds its own!
logging.getLogger('sqlalchemy').handlers=[]
self.meta = MetaData(bind=self.engine)
if self.level < self.CONNECT:
return
#self.session = create_session(bind=self.engine)
if self.level < self.TXN:
return
#self.txn = self.session.begin()
def _disconnect(self):
if hasattr(self, 'txn'):
self.txn.rollback()
if hasattr(self, 'session'):
self.session.close()
#if hasattr(self,'conn'):
# self.conn.close()
self.engine.dispose()
def _supported(self, url):
db = url.split(':',1)[0]
func = getattr(self, self._TestCase__testMethodName)
if hasattr(func, 'supported'):
return db in func.supported
if hasattr(func, 'not_supported'):
return not (db in func.not_supported)
# Neither list assigned; assume all are supported
return True
def _not_supported(self, url):
return not self._supported(url)
def _select_row(self):
"""Select rows, used in multiple tests"""
return self.table.select().execution_options(
autocommit=True).execute().fetchone()
def refresh_table(self, name=None):
"""Reload the table from the database
Assumes we're working with only a single table, self.table, and
metadata self.meta
Working w/ multiple tables is not possible, as tables can only be
reloaded with meta.clear()
"""
if name is None:
name = self.table.name
self.meta.clear()
self.table = Table(name, self.meta, autoload=True)
def compare_columns_equal(self, columns1, columns2, ignore=None):
"""Loop through all columns and compare them"""
def key(column):
return column.name
for c1, c2 in zip(sorted(columns1, key=key), sorted(columns2, key=key)):
diffs = ColumnDelta(c1, c2).diffs
if ignore:
for key in ignore:
diffs.pop(key, None)
if diffs:
self.fail("Comparing %s to %s failed: %s" % (columns1, columns2, diffs))
# TODO: document engine.dispose and write tests
| bsd-3-clause |
mnahm5/django-estore | Lib/site-packages/cryptography/hazmat/primitives/serialization.py | 21 | 5288 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import base64
import struct
from enum import Enum
import six
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
def load_pem_private_key(data, password, backend):
return backend.load_pem_private_key(data, password)
def load_pem_public_key(data, backend):
return backend.load_pem_public_key(data)
def load_der_private_key(data, password, backend):
return backend.load_der_private_key(data, password)
def load_der_public_key(data, backend):
return backend.load_der_public_key(data)
def load_ssh_public_key(data, backend):
key_parts = data.split(b' ', 2)
if len(key_parts) < 2:
raise ValueError(
'Key is not in the proper format or contains extra data.')
key_type = key_parts[0]
if key_type == b'ssh-rsa':
loader = _load_ssh_rsa_public_key
elif key_type == b'ssh-dss':
loader = _load_ssh_dss_public_key
elif key_type in [
b'ecdsa-sha2-nistp256', b'ecdsa-sha2-nistp384', b'ecdsa-sha2-nistp521',
]:
loader = _load_ssh_ecdsa_public_key
else:
raise UnsupportedAlgorithm('Key type is not supported.')
key_body = key_parts[1]
try:
decoded_data = base64.b64decode(key_body)
except TypeError:
raise ValueError('Key is not in the proper format.')
inner_key_type, rest = _ssh_read_next_string(decoded_data)
if inner_key_type != key_type:
raise ValueError(
'Key header and key body contain different key type values.'
)
return loader(key_type, rest, backend)
def _load_ssh_rsa_public_key(key_type, decoded_data, backend):
e, rest = _ssh_read_next_mpint(decoded_data)
n, rest = _ssh_read_next_mpint(rest)
if rest:
raise ValueError('Key body contains extra bytes.')
return rsa.RSAPublicNumbers(e, n).public_key(backend)
def _load_ssh_dss_public_key(key_type, decoded_data, backend):
p, rest = _ssh_read_next_mpint(decoded_data)
q, rest = _ssh_read_next_mpint(rest)
g, rest = _ssh_read_next_mpint(rest)
y, rest = _ssh_read_next_mpint(rest)
if rest:
raise ValueError('Key body contains extra bytes.')
parameter_numbers = dsa.DSAParameterNumbers(p, q, g)
public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers)
return public_numbers.public_key(backend)
def _load_ssh_ecdsa_public_key(expected_key_type, decoded_data, backend):
curve_name, rest = _ssh_read_next_string(decoded_data)
data, rest = _ssh_read_next_string(rest)
if expected_key_type != b"ecdsa-sha2-" + curve_name:
raise ValueError(
'Key header and key body contain different key type values.'
)
if rest:
raise ValueError('Key body contains extra bytes.')
curve = {
b"nistp256": ec.SECP256R1,
b"nistp384": ec.SECP384R1,
b"nistp521": ec.SECP521R1,
}[curve_name]()
if six.indexbytes(data, 0) != 4:
raise NotImplementedError(
"Compressed elliptic curve points are not supported"
)
numbers = ec.EllipticCurvePublicNumbers.from_encoded_point(curve, data)
return numbers.public_key(backend)
def _ssh_read_next_string(data):
"""
Retrieves the next RFC 4251 string value from the data.
While the RFC calls these strings, in Python they are bytes objects.
"""
if len(data) < 4:
raise ValueError("Key is not in the proper format")
str_len, = struct.unpack('>I', data[:4])
if len(data) < str_len + 4:
raise ValueError("Key is not in the proper format")
return data[4:4 + str_len], data[4 + str_len:]
def _ssh_read_next_mpint(data):
"""
Reads the next mpint from the data.
Currently, all mpints are interpreted as unsigned.
"""
mpint_data, rest = _ssh_read_next_string(data)
return (
utils.int_from_bytes(mpint_data, byteorder='big', signed=False), rest
)
def _ssh_write_string(data):
return struct.pack(">I", len(data)) + data
def _ssh_write_mpint(value):
data = utils.int_to_bytes(value)
if six.indexbytes(data, 0) & 0x80:
data = b"\x00" + data
return _ssh_write_string(data)
class Encoding(Enum):
PEM = "PEM"
DER = "DER"
OpenSSH = "OpenSSH"
class PrivateFormat(Enum):
PKCS8 = "PKCS8"
TraditionalOpenSSL = "TraditionalOpenSSL"
class PublicFormat(Enum):
SubjectPublicKeyInfo = "X.509 subjectPublicKeyInfo with PKCS#1"
PKCS1 = "Raw PKCS#1"
OpenSSH = "OpenSSH"
@six.add_metaclass(abc.ABCMeta)
class KeySerializationEncryption(object):
pass
@utils.register_interface(KeySerializationEncryption)
class BestAvailableEncryption(object):
def __init__(self, password):
if not isinstance(password, bytes) or len(password) == 0:
raise ValueError("Password must be 1 or more bytes.")
self.password = password
@utils.register_interface(KeySerializationEncryption)
class NoEncryption(object):
pass
| mit |
redhat-openstack/trove | trove/tests/fakes/taskmanager.py | 4 | 1986 | # Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from collections import defaultdict
import eventlet
from oslo_log import log as logging
from trove import rpc
from trove.taskmanager.api import API
from trove.taskmanager.manager import Manager
import trove.tests.util.usage as usage
LOG = logging.getLogger(__name__)
MESSAGE_QUEUE = defaultdict(list)
class FakeRpcClient(object):
def call(self, context, method_name, *args, **kwargs):
manager, method = self._get_tm_method(method_name)
return method(manager, context, *args, **kwargs)
def cast(self, context, method_name, *args, **kwargs):
manager, method = self._get_tm_method(method_name)
def func():
method(manager, context, *args, **kwargs)
eventlet.spawn_after(0.1, func)
def _get_tm_method(self, method_name):
manager = Manager()
method = getattr(Manager, method_name)
return manager, method
def prepare(self, *args, **kwargs):
return self
class FakeNotifier:
def info(self, ctxt, event_type, payload):
usage.notify(event_type, payload)
def monkey_patch():
def fake_get_client(self, *args, **kwargs):
return FakeRpcClient()
def fake_get_notifier(service=None, host=None, publisher_id=None):
return FakeNotifier()
API.get_client = fake_get_client
rpc.get_notifier = fake_get_notifier
| apache-2.0 |
danielreed/python-hpOneView | examples/scripts/get-libver.py | 4 | 1800 | #!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
import hpOneView as hpov
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
print('hpOneView version: ', hpov.__version__)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| mit |
40223249-1/-w16b_test | static/Brython3.1.3-20150514-095342/Lib/unittest/test/testmock/testmagicmethods.py | 737 | 12145 | import unittest
import inspect
import sys
from unittest.mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
m.__bool__ = lambda s: False
self.assertFalse(bool(m))
def test_comparison(self):
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
getattr(mock, '__bool__').return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegex(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
thanhacun/odoo | addons/lunch/tests/__init__.py | 260 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_lunch
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
allotria/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools.py | 178 | 1550 | """ Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in (u'ifilterfalse', u'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node which contains ('.', 'function') with the
# function (to be consistent with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
| apache-2.0 |
dibaunaumh/tikal-corp-website | cms/templatetags/cms_tags.py | 1 | 19309 | from itertools import chain
from cms.exceptions import NoHomeFound
from cms.utils import get_language_from_request, get_template_from_request
from cms.utils.moderator import get_cmsplugin_queryset, get_page_queryset
from cms.plugin_rendering import render_plugins, render_placeholder, render_placeholder_toolbar
from cms.plugins.utils import get_plugins
from cms.models import Page, Placeholder
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.mail import mail_managers
from django.template.defaultfilters import title
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.forms.widgets import Media
import operator
register = template.Library()
def get_site_id(site):
if site:
if isinstance(site, Site):
site_id = site.id
elif isinstance(site, int):
site_id = site
else:
site_id = settings.SITE_ID
else:
site_id = settings.SITE_ID
return site_id
def has_permission(page, request):
return page.has_change_permission(request)
register.filter(has_permission)
def _get_cache_key(name, page_lookup, lang, site_id):
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
return name+'__page_lookup:'+page_key+'_site:'+str(site_id)+'_lang:'+str(lang)
def _get_page_by_untyped_arg(page_lookup, request, site_id):
"""
The `page_lookup` argument can be of any of the following types:
- Integer: interpreted as `pk` of the desired page
- String: interpreted as `reverse_id` of the desired page
- `dict`: a dictionary containing keyword arguments to find the desired page
(for instance: `{'pk': 1}`)
- `Page`: you can also pass a Page object directly, in which case there will be no database lookup.
- `None`: the current page will be used
"""
if page_lookup is None:
return request.current_page
if isinstance(page_lookup, Page):
return page_lookup
if isinstance(page_lookup, basestring):
page_lookup = {'reverse_id': page_lookup}
elif isinstance(page_lookup, (int, long)):
page_lookup = {'pk': page_lookup}
elif not isinstance(page_lookup, dict):
raise TypeError('The page_lookup argument can be either a Dictionary, Integer, Page, or String.')
page_lookup.update({'site': site_id})
try:
return get_page_queryset(request).get(**page_lookup)
except Page.DoesNotExist:
site = Site.objects.get_current()
subject = _('Page not found on %(domain)s') % {'domain':site.domain}
body = _("A template tag couldn't find the page with lookup arguments `%(page_lookup)s\n`. "
"The URL of the request was: http://%(host)s%(path)s") \
% {'page_lookup': repr(page_lookup), 'host': site.domain, 'path': request.path}
if settings.DEBUG:
raise Page.DoesNotExist(body)
else:
mail_managers(subject, body, fail_silently=True)
return None
def page_url(context, page_lookup, lang=None, site=None):
"""
Show the url of a page with a reverse id in the right language
This is mostly used if you want to have a static link in a template to a page
"""
site_id = get_site_id(site)
request = context.get('request', False)
if not request:
return {'content': ''}
if request.current_page == "dummy":
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
cache_key = _get_cache_key('page_url', page_lookup, lang, site_id)+'_type:absolute_url'
url = cache.get(cache_key)
if not url:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
url = page.get_absolute_url(language=lang)
cache.set(cache_key, url, settings.CMS_CONTENT_CACHE_DURATION)
if url:
return {'content': url}
return {'content': ''}
page_url = register.inclusion_tag('cms/content.html', takes_context=True)(page_url)
def page_id_url(context, reverse_id, lang=None, site=None):
return page_url(context, reverse_id, lang, site)
page_id_url = register.inclusion_tag('cms/content.html', takes_context=True)(page_id_url)
def do_placeholder(parser, token):
error_string = '%r tag requires at least 1 and accepts at most 2 arguments'
nodelist_or = None
inherit = False
try:
# split_contents() knows not to split quoted strings.
bits = token.split_contents()
# if the `placeholderor` tag was used, look for closing tag, and pass the enclosed nodes
# to PlaceholderNode below
if bits[-1].lower() == 'or':
bits.pop()
nodelist_or = parser.parse(('endplaceholder',))
parser.delete_first_token()
elif bits[-1].lower() == 'inherit':
bits.pop()
inherit = True
else:
bit = bits[-1]
if bit[0] == bit[-1] and bit[0] in ('"', "'"):
bit = bit[1:-1]
if bit.isdigit():
import warnings
warnings.warn("The width parameter for the placeholder tag is deprecated.", DeprecationWarning)
except ValueError:
raise template.TemplateSyntaxError(error_string % bits[0])
if len(bits) == 2:
#tag_name, name
return PlaceholderNode(bits[1], nodelist_or=nodelist_or, inherit=inherit)
elif len(bits) == 3:
#tag_name, name, width
return PlaceholderNode(bits[1], bits[2], nodelist_or=nodelist_or, inherit=inherit)
else:
raise template.TemplateSyntaxError(error_string % bits[0])
class PlaceholderNode(template.Node):
"""This template node is used to output page content and
is also used in the admin to dynamically generate input fields.
eg: {% placeholder "placeholder_name" %}
{% placeholder "sidebar" inherit %}
{% placeholder "footer" inherit or %}
<a href="/about/">About us</a>
{% endplaceholder %}
Keyword arguments:
name -- the name of the placeholder
width -- additional width attribute (integer) which gets added to the plugin context
(deprecated, use `{% with 320 as width %}{% placeholder "foo"}{% endwith %}`)
inherit -- optional argument which if given will result in inheriting
the content of the placeholder with the same name on parent pages
or -- optional argument which if given will make the template tag a block
tag whose content is shown if the placeholder is empty
"""
def __init__(self, name, width=None, nodelist_or=None, inherit=False):
self.name = "".join(name.lower().split('"'))
if width:
self.width_var = template.Variable(width)
self.nodelist_or = nodelist_or
self.inherit = inherit
def __repr__(self):
return "<Placeholder Node: %s>" % self.name
def render(self, context):
if not 'request' in context:
return ''
request = context['request']
width_var = getattr(self, 'width_var', None)
if width_var:
try:
width = int(width_var.resolve(context))
context.update({'width': width})
except (template.VariableDoesNotExist, ValueError):
pass
page = request.current_page
if not page or page == 'dummy':
return ''
try:
placeholder = page.placeholders.get(slot=self.name)
except Placeholder.DoesNotExist:
from cms.utils.plugins import get_placeholders
placeholders = get_placeholders(page.get_template())
found = None
for slot in placeholders:
new, created = page.placeholders.get_or_create(slot=slot)
if slot == self.name:
found = new
placeholder = found
if not found:
if settings.DEBUG:
raise Placeholder.DoesNotExist("No placeholder '%s' found for page '%s'" % (self.name, page.pk))
else:
return "<!-- ERROR:cms.utils.plugins.get_placeholders:%s -->" % self.name
content = self.get_content(request, page, context)
if not content:
if self.nodelist_or:
content = self.nodelist_or.render(context)
if self.edit_mode(placeholder, context):
return render_placeholder_toolbar(placeholder, context, content)
return content
return content
def edit_mode(self, placeholder, context):
from cms.utils.placeholder import get_page_from_placeholder_if_exists
request = context['request']
page = get_page_from_placeholder_if_exists(placeholder)
if ("edit" in request.GET or request.session.get("cms_edit", False)) and \
'cms.middleware.toolbar.ToolbarMiddleware' in settings.MIDDLEWARE_CLASSES and \
request.user.is_staff and request.user.is_authenticated() and \
(not page or page.has_change_permission(request)):
return True
return False
def get_content(self, request, page, context):
from cms.utils.plugins import get_placeholders
pages = [page]
if self.inherit:
pages = chain([page], page.get_cached_ancestors(ascending=True))
for page in pages:
template = get_template_from_request(request, page)
placeholder = page.placeholders.filter(slot__in=get_placeholders(template)).get(slot=self.name)
if not get_plugins(request, placeholder):
continue
if hasattr(request, 'placeholder_media'):
request.placeholder_media = reduce(operator.add, [request.placeholder_media, placeholder.get_media(request, context)])
#request.placeholder_media += placeholder.get_media(request, context)
content = render_placeholder(placeholder, context)
if content:
return content
return ''
register.tag('placeholder', do_placeholder)
def do_page_attribute(parser, token):
error_string = '%r tag requires one argument' % token.contents[0]
try:
# split_contents() knows not to split quoted strings.
bits = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(error_string)
if len(bits) >= 2:
# tag_name, name
# tag_name, name, page_lookup
page_lookup = len(bits) == 3 and bits[2] or None
return PageAttributeNode(bits[1], page_lookup)
else:
raise template.TemplateSyntaxError(error_string)
class PageAttributeNode(template.Node):
"""This template node is used to output attribute from a page such
as its title or slug.
Synopsis
{% page_attribute "field-name" %}
{% page_attribute "field-name" page_lookup %}
Example
{# Output current page's page_title attribute: #}
{% page_attribute "page_title" %}
{# Output page_title attribute of the page with reverse_id "the_page": #}
{% page_attribute "page_title" "the_page" %}
{# Output slug attribute of the page with pk 10: #}
{% page_attribute "slug" 10 %}
Keyword arguments:
field-name -- the name of the field to output. Use one of:
- title
- menu_title
- page_title
- slug
- meta_description
- meta_keywords
page_lookup -- lookup argument for Page, if omitted field-name of current page is returned.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
"""
def __init__(self, name, page_lookup=None):
self.name_var = template.Variable(name)
self.page_lookup = None
self.valid_attributes = ["title", "slug", "meta_description", "meta_keywords", "page_title", "menu_title"]
if page_lookup:
self.page_lookup_var = template.Variable(page_lookup)
def render(self, context):
if not 'request' in context:
return ''
var_name = self.name_var.var.lower()
if var_name in self.valid_attributes:
# Variable name without quotes works, but is deprecated
self.name = var_name
else:
self.name = self.name_var.resolve(context)
lang = get_language_from_request(context['request'])
page_lookup_var = getattr(self, 'page_lookup_var', None)
if page_lookup_var:
page_lookup = page_lookup_var.resolve(context)
else:
page_lookup = None
page = _get_page_by_untyped_arg(page_lookup, context['request'], get_site_id(None))
if page == "dummy":
return ''
if page and self.name in self.valid_attributes:
f = getattr(page, "get_"+self.name)
return f(language=lang, fallback=True)
return ''
def __repr__(self):
return "<PageAttribute Node: %s>" % self.name
register.tag('page_attribute', do_page_attribute)
def clean_admin_list_filter(cl, spec):
"""
used in admin to display only these users that have actually edited a page and not everybody
"""
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title(), 'choices' : unique_choices}
clean_admin_list_filter = register.inclusion_tag('admin/filter.html')(clean_admin_list_filter)
def _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=None,
site=None, cache_result=True):
"""
Shows the content of a page with a placeholder name and given lookup arguments in the given language.
This is useful if you want to have some more or less static content that is shared among many pages,
such as a footer.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
"""
request = context.get('request', False)
site_id = get_site_id(site)
if not request:
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
content = None
if cache_result:
cache_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)+'_placeholder:'+placeholder_name
content = cache.get(cache_key)
if not content:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if not page:
return {'content': ''}
placeholder = page.placeholders.get(slot=placeholder_name)
plugins = get_cmsplugin_queryset(request).filter(placeholder=placeholder, language=lang, placeholder__slot__iexact=placeholder_name, parent__isnull=True).order_by('position').select_related()
c = render_plugins(plugins, context, placeholder)
content = "".join(c)
if cache_result:
cache.set(cache_key, content, settings.CMS_CONTENT_CACHE_DURATION)
if content:
return {'content': mark_safe(content)}
return {'content': ''}
def show_placeholder_by_id(context, placeholder_name, reverse_id, lang=None, site=None):
"""
Show the content of a specific placeholder, from a page found by reverse id, in the given language.
This templatetag is deprecated, replace with `show_placeholder`.
"""
return _show_placeholder_for_page(context, placeholder_name, reverse_id, lang=lang, site=site)
show_placeholder_by_id = register.inclusion_tag('cms/content.html', takes_context=True)(show_placeholder_by_id)
def show_uncached_placeholder_by_id(context, placeholder_name, reverse_id, lang=None, site=None):
"""
Show the uncached content of a specific placeholder, from a page found by reverse id, in the given language.
This templatetag is deprecated, replace with `show_uncached_placeholder`.
"""
return _show_placeholder_for_page(context, placeholder_name, reverse_id,
lang=lang, site=site, cache_result=False)
show_uncached_placeholder_by_id = register.inclusion_tag('cms/content.html', takes_context=True)(show_uncached_placeholder_by_id)
def show_placeholder(context, placeholder_name, page_lookup, lang=None, site=None):
"""
Show the content of a specific placeholder, from a page found by pk|reverse_id|dict
or passed to the function, in the given language.
"""
return _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=lang, site=site)
show_placeholder_for_page = register.inclusion_tag('cms/content.html', takes_context=True)(show_placeholder)
def show_uncached_placeholder(context, placeholder_name, page_lookup, lang=None, site=None):
"""
Show the uncached content of a specific placeholder, from a page found by pk|reverse_id|dict
or passed to the function, in the given language.
"""
return _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=lang, site=site, cache_result=False)
show_uncached_placeholder_for_page = register.inclusion_tag('cms/content.html', takes_context=True)(show_uncached_placeholder)
def do_plugins_media(parser, token):
args = token.split_contents()
if len(args) > 2:
raise template.TemplateSyntaxError("Invalid syntax. Expected "
"'{%% %s [page_lookup] %%}'" % tag)
elif len(args) == 2:
page_lookup = args[1]
else:
page_lookup = None
return PluginsMediaNode(page_lookup)
class PluginsMediaNode(template.Node):
"""
This template node is used to output media for plugins.
eg: {% plugins_media %}
You can also pass the object a page_lookup arg if you want to output media tags for a specific
page other than the current page.
eg: {% plugins_media "gallery" %}
"""
def __init__(self, page_lookup=None):
if page_lookup:
self.page_lookup_var = template.Variable(page_lookup)
def render(self, context):
from cms.plugins.utils import get_plugins_media
if not 'request' in context:
return ''
request = context['request']
from cms.plugins.utils import get_plugins_media
plugins_media = None
page_lookup_var = getattr(self, 'page_lookup_var', None)
if page_lookup_var:
page_lookup = page_lookup_var.resolve(context)
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
plugins_media = get_plugins_media(request, context, page)
else:
page = request.current_page
if page == "dummy":
return ''
# make sure the plugin cache is filled
plugins_media = get_plugins_media(request, context, request._current_page_cache)
if plugins_media:
return plugins_media.render()
else:
return u''
def __repr__(self):
return "<PluginsMediaNode Node: %s>" % getattr(self, 'name', '')
register.tag('plugins_media', do_plugins_media)
| bsd-3-clause |
yoer/hue | desktop/core/ext-py/Django-1.6.10/django/core/management/commands/diffsettings.py | 120 | 1648 | from optparse import make_option
from django.core.management.base import NoArgsCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""Converts a module namespace to a Python dictionary."""
return dict((k, repr(v)) for k, v in module.__dict__.items() if not omittable(k))
class Command(NoArgsCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
option_list = NoArgsCommand.option_list + (
make_option('--all', action='store_true', dest='all', default=False,
help='Display all settings, regardless of their value. '
'Default values are prefixed by "###".'),
)
requires_model_validation = False
def handle_noargs(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options['all']:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
| apache-2.0 |
luca76/QGIS | python/ext-libs/pygments/styles/murphy.py | 364 | 2751 | # -*- coding: utf-8 -*-
"""
pygments.styles.murphy
~~~~~~~~~~~~~~~~~~~~~~
Murphy's style from CodeRay.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class MurphyStyle(Style):
"""
Murphy's style from CodeRay.
"""
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "#666 italic",
Comment.Preproc: "#579 noitalic",
Comment.Special: "#c00 bold",
Keyword: "bold #289",
Keyword.Pseudo: "#08f",
Keyword.Type: "#66f",
Operator: "#333",
Operator.Word: "bold #000",
Name.Builtin: "#072",
Name.Function: "bold #5ed",
Name.Class: "bold #e9e",
Name.Namespace: "bold #0e84b5",
Name.Exception: "bold #F00",
Name.Variable: "#036",
Name.Variable.Instance: "#aaf",
Name.Variable.Class: "#ccf",
Name.Variable.Global: "#f84",
Name.Constant: "bold #5ed",
Name.Label: "bold #970",
Name.Entity: "#800",
Name.Attribute: "#007",
Name.Tag: "#070",
Name.Decorator: "bold #555",
String: "bg:#e0e0ff",
String.Char: "#88F bg:",
String.Doc: "#D42 bg:",
String.Interpol: "bg:#eee",
String.Escape: "bold #666",
String.Regex: "bg:#e0e0ff #000",
String.Symbol: "#fc8 bg:",
String.Other: "#f88",
Number: "bold #60E",
Number.Integer: "bold #66f",
Number.Float: "bold #60E",
Number.Hex: "bold #058",
Number.Oct: "bold #40E",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#F00 bg:#FAA"
}
| gpl-2.0 |
guorendong/iridium-browser-ubuntu | tools/telemetry/telemetry/results/buildbot_output_formatter.py | 8 | 4594 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import perf_tests_helper
from telemetry.results import output_formatter
from telemetry import value as value_module
from telemetry.value import summary as summary_module
class BuildbotOutputFormatter(output_formatter.OutputFormatter):
def __init__(self, output_stream, trace_tag=''):
super(BuildbotOutputFormatter, self).__init__(output_stream)
self._trace_tag = trace_tag
def _PrintPerfResult(self, measurement, trace, v, units,
result_type='default'):
output = perf_tests_helper.PrintPerfResult(
measurement, trace, v, units, result_type, print_to_stdout=False)
self.output_stream.write(output + '\n')
self.output_stream.flush()
def Format(self, page_test_results):
"""Print summary data in a format expected by buildbot for perf dashboards.
If any failed pages exist, only output individual page results, and do
not output any average data.
"""
had_failures = len(page_test_results.failures) > 0
# Print out the list of unique pages.
perf_tests_helper.PrintPages(
[page.display_name for page in page_test_results.pages_that_succeeded])
summary = summary_module.Summary(page_test_results.all_page_specific_values)
for value in summary.interleaved_computed_per_page_values_and_summaries:
if value.page:
self._PrintComputedPerPageValue(value)
else:
self._PrintComputedSummaryValue(value, had_failures)
self._PrintOverallResults(page_test_results)
def _PrintComputedPerPageValue(self, value):
# We dont print per-page-values when there is a trace tag.
if self._trace_tag:
return
# Actually print the result.
buildbot_value = value.GetBuildbotValue()
buildbot_data_type = value.GetBuildbotDataType(
output_context=value_module.PER_PAGE_RESULT_OUTPUT_CONTEXT)
if buildbot_value is None or buildbot_data_type is None:
return
buildbot_measurement_name, buildbot_trace_name = (
value.GetChartAndTraceNameForPerPageResult())
self._PrintPerfResult(buildbot_measurement_name,
buildbot_trace_name,
buildbot_value, value.units, buildbot_data_type)
def _PrintComputedSummaryValue(self, value, had_failures):
# If there were any page errors, we typically will print nothing.
#
# Note: this branch is structured less-densely to improve legibility.
if had_failures:
return
buildbot_value = value.GetBuildbotValue()
buildbot_data_type = value.GetBuildbotDataType(
output_context=value_module.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT)
if buildbot_value is None or buildbot_data_type is None:
return
buildbot_measurement_name, buildbot_trace_name = (
value.GetChartAndTraceNameForComputedSummaryResult(
self._trace_tag))
self._PrintPerfResult(buildbot_measurement_name,
buildbot_trace_name,
buildbot_value, value.units, buildbot_data_type)
def _PrintOverallResults(self, page_test_results):
# If there were no failed pages, output the overall results (results not
# associated with a page).
had_failures = len(page_test_results.failures) > 0
if not had_failures:
for value in page_test_results.all_summary_values:
buildbot_value = value.GetBuildbotValue()
buildbot_data_type = value.GetBuildbotDataType(
output_context=value_module.SUMMARY_RESULT_OUTPUT_CONTEXT)
buildbot_measurement_name, buildbot_trace_name = (
value.GetChartAndTraceNameForComputedSummaryResult(
self._trace_tag))
self._PrintPerfResult(
buildbot_measurement_name,
buildbot_trace_name,
buildbot_value,
value.units,
buildbot_data_type)
# Print the number of failed and errored pages.
self._PrintPerfResult('telemetry_page_measurement_results', 'num_failed',
[len(page_test_results.failures)], 'count',
'unimportant')
# TODO(chrishenry): Remove this in a separate patch to reduce the risk
# of rolling back due to buildbot breakage.
# Also fix src/tools/bisect-perf-regression_test.py when this is
# removed.
self._PrintPerfResult('telemetry_page_measurement_results', 'num_errored',
[0], 'count', 'unimportant')
| bsd-3-clause |
demarle/VTK | ThirdParty/Twisted/twisted/test/test_postfix.py | 41 | 3558 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.protocols.postfix module.
"""
from twisted.trial import unittest
from twisted.protocols import postfix
from twisted.test.proto_helpers import StringTransport
class PostfixTCPMapQuoteTestCase(unittest.TestCase):
data = [
# (raw, quoted, [aliasQuotedForms]),
('foo', 'foo'),
('foo bar', 'foo%20bar'),
('foo\tbar', 'foo%09bar'),
('foo\nbar', 'foo%0Abar', 'foo%0abar'),
('foo\r\nbar', 'foo%0D%0Abar', 'foo%0D%0abar', 'foo%0d%0Abar', 'foo%0d%0abar'),
('foo ', 'foo%20'),
(' foo', '%20foo'),
]
def testData(self):
for entry in self.data:
raw = entry[0]
quoted = entry[1:]
self.assertEqual(postfix.quote(raw), quoted[0])
for q in quoted:
self.assertEqual(postfix.unquote(q), raw)
class PostfixTCPMapServerTestCase:
data = {
# 'key': 'value',
}
chat = [
# (input, expected_output),
]
def test_chat(self):
"""
Test that I{get} and I{put} commands are responded to correctly by
L{postfix.PostfixTCPMapServer} when its factory is an instance of
L{postifx.PostfixTCPMapDictServerFactory}.
"""
factory = postfix.PostfixTCPMapDictServerFactory(self.data)
transport = StringTransport()
protocol = postfix.PostfixTCPMapServer()
protocol.service = factory
protocol.factory = factory
protocol.makeConnection(transport)
for input, expected_output in self.chat:
protocol.lineReceived(input)
self.assertEqual(
transport.value(), expected_output,
'For %r, expected %r but got %r' % (
input, expected_output, transport.value()))
transport.clear()
protocol.setTimeout(None)
def test_deferredChat(self):
"""
Test that I{get} and I{put} commands are responded to correctly by
L{postfix.PostfixTCPMapServer} when its factory is an instance of
L{postifx.PostfixTCPMapDeferringDictServerFactory}.
"""
factory = postfix.PostfixTCPMapDeferringDictServerFactory(self.data)
transport = StringTransport()
protocol = postfix.PostfixTCPMapServer()
protocol.service = factory
protocol.factory = factory
protocol.makeConnection(transport)
for input, expected_output in self.chat:
protocol.lineReceived(input)
self.assertEqual(
transport.value(), expected_output,
'For %r, expected %r but got %r' % (
input, expected_output, transport.value()))
transport.clear()
protocol.setTimeout(None)
class Valid(PostfixTCPMapServerTestCase, unittest.TestCase):
data = {
'foo': 'ThisIs Foo',
'bar': ' bar really is found\r\n',
}
chat = [
('get', "400 Command 'get' takes 1 parameters.\n"),
('get foo bar', "500 \n"),
('put', "400 Command 'put' takes 2 parameters.\n"),
('put foo', "400 Command 'put' takes 2 parameters.\n"),
('put foo bar baz', "500 put is not implemented yet.\n"),
('put foo bar', '500 put is not implemented yet.\n'),
('get foo', '200 ThisIs%20Foo\n'),
('get bar', '200 %20bar%20really%20is%20found%0D%0A\n'),
('get baz', '500 \n'),
('foo', '400 unknown command\n'),
]
| bsd-3-clause |
dulems/hue | desktop/core/ext-py/Django-1.6.10/tests/settings_tests/tests.py | 49 | 11443 | import os
import warnings
from django.conf import settings, global_settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import SimpleTestCase, TransactionTestCase, TestCase, signals
from django.test.utils import override_settings
from django.utils import unittest, six
@override_settings(TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@override_settings(TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertEqual(settings.TEST, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
def test_override(self):
self.assertEqual(settings.TEST, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
try:
super(ClassDecoratedTestCase, self).test_max_recursion_error()
except RuntimeError:
self.fail()
class SettingsTests(TestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
self.assertRaises(AttributeError, getattr, settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override = override_settings(TEST='override')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with six.assertRaisesRegex(self, Exception,
"Only subclasses of Django SimpleTestCase*"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertEqual(self.testvalue, None)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_settings_delete_wrapped(self):
self.assertRaises(TypeError, delattr, settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overriden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
with self.settings(USE_I18N=False):
del settings.USE_I18N
self.assertRaises(AttributeError, getattr, settings, 'USE_I18N')
self.assertEqual(settings.USE_I18N, previous_i18n)
def test_override_settings_nested(self):
"""
Test that override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
def test_allowed_include_roots_string(self):
"""
ALLOWED_INCLUDE_ROOTS is not allowed to be incorrectly set to a string
rather than a tuple.
"""
self.assertRaises(ValueError, setattr, settings,
'ALLOWED_INCLUDE_ROOTS', '/var/www/ssi/')
class TrailingSlashURLTests(TestCase):
"""
Tests for the MEDIA_URL and STATIC_URL settings.
They must end with a slash to ensure there's a deterministic way to build
paths in templates.
"""
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
self._original_static_url = self.settings_module.STATIC_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
self.settings_module.STATIC_URL = self._original_static_url
def test_blank(self):
"""
The empty string is accepted, even though it doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ''
self.assertEqual('', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = ''
self.assertEqual('', self.settings_module.STATIC_URL)
def test_end_slash(self):
"""
It works if the value ends in a slash.
"""
self.settings_module.MEDIA_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/'
self.assertEqual('http://media.foo.com/',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/'
self.assertEqual('http://static.foo.com/',
self.settings_module.STATIC_URL)
def test_no_end_slash(self):
"""
An ImproperlyConfigured exception is raised if the value doesn't end
in a slash.
"""
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = 'http://media.foo.com'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = 'http://static.foo.com'
def test_double_slash(self):
"""
If the value ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/stupid//'
self.assertEqual('http://media.foo.com/stupid//',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/stupid//'
self.assertEqual('http://static.foo.com/stupid//',
self.settings_module.STATIC_URL)
class SecureProxySslHeaderTest(TestCase):
settings_module = settings
def setUp(self):
self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER
def tearDown(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting
def test_none(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = None
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_without_xheader(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_wrong(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_right(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertEqual(req.is_secure(), True)
| apache-2.0 |
firerszd/kbengine | kbe/src/lib/python/Lib/test/test_modulefinder.py | 79 | 8949 | import os
import errno
import importlib.machinery
import py_compile
import shutil
import unittest
import tempfile
from test import support
import modulefinder
TEST_DIR = tempfile.mkdtemp()
TEST_PATH = [TEST_DIR, os.path.dirname(tempfile.__file__)]
# Each test description is a list of 5 items:
#
# 1. a module name that will be imported by modulefinder
# 2. a list of module names that modulefinder is required to find
# 3. a list of module names that modulefinder should complain
# about because they are not found
# 4. a list of module names that modulefinder should complain
# about because they MAY be not found
# 5. a string specifying packages to create; the format is obvious imo.
#
# Each package will be created in TEST_DIR, and TEST_DIR will be
# removed after the tests again.
# Modulefinder searches in a path that contains TEST_DIR, plus
# the standard Lib directory.
maybe_test = [
"a.module",
["a", "a.module", "sys",
"b"],
["c"], ["b.something"],
"""\
a/__init__.py
a/module.py
from b import something
from c import something
b/__init__.py
from sys import *
"""]
maybe_test_new = [
"a.module",
["a", "a.module", "sys",
"b", "__future__"],
["c"], ["b.something"],
"""\
a/__init__.py
a/module.py
from b import something
from c import something
b/__init__.py
from __future__ import absolute_import
from sys import *
"""]
package_test = [
"a.module",
["a", "a.b", "a.c", "a.module", "mymodule", "sys"],
["blahblah", "c"], [],
"""\
mymodule.py
a/__init__.py
import blahblah
from a import b
import c
a/module.py
import sys
from a import b as x
from a.c import sillyname
a/b.py
a/c.py
from a.module import x
import mymodule as sillyname
from sys import version_info
"""]
absolute_import_test = [
"a.module",
["a", "a.module",
"b", "b.x", "b.y", "b.z",
"__future__", "sys", "gc"],
["blahblah", "z"], [],
"""\
mymodule.py
a/__init__.py
a/module.py
from __future__ import absolute_import
import sys # sys
import blahblah # fails
import gc # gc
import b.x # b.x
from b import y # b.y
from b.z import * # b.z.*
a/gc.py
a/sys.py
import mymodule
a/b/__init__.py
a/b/x.py
a/b/y.py
a/b/z.py
b/__init__.py
import z
b/unused.py
b/x.py
b/y.py
b/z.py
"""]
relative_import_test = [
"a.module",
["__future__",
"a", "a.module",
"a.b", "a.b.y", "a.b.z",
"a.b.c", "a.b.c.moduleC",
"a.b.c.d", "a.b.c.e",
"a.b.x",
"gc"],
[], [],
"""\
mymodule.py
a/__init__.py
from .b import y, z # a.b.y, a.b.z
a/module.py
from __future__ import absolute_import # __future__
import gc # gc
a/gc.py
a/sys.py
a/b/__init__.py
from ..b import x # a.b.x
#from a.b.c import moduleC
from .c import moduleC # a.b.moduleC
a/b/x.py
a/b/y.py
a/b/z.py
a/b/g.py
a/b/c/__init__.py
from ..c import e # a.b.c.e
a/b/c/moduleC.py
from ..c import d # a.b.c.d
a/b/c/d.py
a/b/c/e.py
a/b/c/x.py
"""]
relative_import_test_2 = [
"a.module",
["a", "a.module",
"a.sys",
"a.b", "a.b.y", "a.b.z",
"a.b.c", "a.b.c.d",
"a.b.c.e",
"a.b.c.moduleC",
"a.b.c.f",
"a.b.x",
"a.another"],
[], [],
"""\
mymodule.py
a/__init__.py
from . import sys # a.sys
a/another.py
a/module.py
from .b import y, z # a.b.y, a.b.z
a/gc.py
a/sys.py
a/b/__init__.py
from .c import moduleC # a.b.c.moduleC
from .c import d # a.b.c.d
a/b/x.py
a/b/y.py
a/b/z.py
a/b/c/__init__.py
from . import e # a.b.c.e
a/b/c/moduleC.py
#
from . import f # a.b.c.f
from .. import x # a.b.x
from ... import another # a.another
a/b/c/d.py
a/b/c/e.py
a/b/c/f.py
"""]
relative_import_test_3 = [
"a.module",
["a", "a.module"],
["a.bar"],
[],
"""\
a/__init__.py
def foo(): pass
a/module.py
from . import foo
from . import bar
"""]
relative_import_test_4 = [
"a.module",
["a", "a.module"],
[],
[],
"""\
a/__init__.py
def foo(): pass
a/module.py
from . import *
"""]
bytecode_test = [
"a",
["a"],
[],
[],
""
]
def open_file(path):
dirname = os.path.dirname(path)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return open(path, "w")
def create_package(source):
ofi = None
try:
for line in source.splitlines():
if line.startswith(" ") or line.startswith("\t"):
ofi.write(line.strip() + "\n")
else:
if ofi:
ofi.close()
ofi = open_file(os.path.join(TEST_DIR, line.strip()))
finally:
if ofi:
ofi.close()
class ModuleFinderTest(unittest.TestCase):
def _do_test(self, info, report=False, debug=0, replace_paths=[]):
import_this, modules, missing, maybe_missing, source = info
create_package(source)
try:
mf = modulefinder.ModuleFinder(path=TEST_PATH, debug=debug,
replace_paths=replace_paths)
mf.import_hook(import_this)
if report:
mf.report()
## # This wouldn't work in general when executed several times:
## opath = sys.path[:]
## sys.path = TEST_PATH
## try:
## __import__(import_this)
## except:
## import traceback; traceback.print_exc()
## sys.path = opath
## return
modules = sorted(set(modules))
found = sorted(mf.modules)
# check if we found what we expected, not more, not less
self.assertEqual(found, modules)
# check for missing and maybe missing modules
bad, maybe = mf.any_missing_maybe()
self.assertEqual(bad, missing)
self.assertEqual(maybe, maybe_missing)
finally:
shutil.rmtree(TEST_DIR)
def test_package(self):
self._do_test(package_test)
def test_maybe(self):
self._do_test(maybe_test)
def test_maybe_new(self):
self._do_test(maybe_test_new)
def test_absolute_imports(self):
self._do_test(absolute_import_test)
def test_relative_imports(self):
self._do_test(relative_import_test)
def test_relative_imports_2(self):
self._do_test(relative_import_test_2)
def test_relative_imports_3(self):
self._do_test(relative_import_test_3)
def test_relative_imports_4(self):
self._do_test(relative_import_test_4)
def test_bytecode(self):
base_path = os.path.join(TEST_DIR, 'a')
source_path = base_path + importlib.machinery.SOURCE_SUFFIXES[0]
bytecode_path = base_path + importlib.machinery.BYTECODE_SUFFIXES[0]
with open_file(source_path) as file:
file.write('testing_modulefinder = True\n')
py_compile.compile(source_path, cfile=bytecode_path)
os.remove(source_path)
self._do_test(bytecode_test)
def test_replace_paths(self):
old_path = os.path.join(TEST_DIR, 'a', 'module.py')
new_path = os.path.join(TEST_DIR, 'a', 'spam.py')
with support.captured_stdout() as output:
self._do_test(maybe_test, debug=2,
replace_paths=[(old_path, new_path)])
output = output.getvalue()
expected = "co_filename %r changed to %r" % (old_path, new_path)
self.assertIn(expected, output)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
invisiblek/python-for-android | python3-alpha/python3-src/Lib/_strptime.py | 47 | 21168 | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE, ASCII
from re import escape as re_escape
from datetime import (date as datetime_date,
timedelta as datetime_timedelta,
timezone as datetime_timezone)
try:
from _thread import allocate_lock as _thread_allocate_lock
except:
from _dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
try:
time.tzset()
except AttributeError:
pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super()
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'z': r"(?P<z>[+-]\d\d[0-5]\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occuring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = "strptime() argument {} must be str, not {}"
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format))
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format)
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError("time data %r does not match format %r" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.keys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
s += "0" * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith("-"):
tzoffset = -tzoffset
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
# Add timezone info
tzname = found_dict.get("Z")
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
return (year, month, day,
hour, minute, second,
weekday, julian, tz, gmtoff, tzname), fraction
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the
format string."""
tt = _strptime(data_string, format)[0]
return time.struct_time(tt[:9])
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a class cls instance based on the input string and the
format string."""
tt, fraction = _strptime(data_string, format)
gmtoff, tzname = tt[-2:]
args = tt[:6] + (fraction,)
if gmtoff is not None:
tzdelta = datetime_timedelta(seconds=gmtoff)
if tzname:
tz = datetime_timezone(tzdelta, tzname)
else:
tz = datetime_timezone(tzdelta)
args += (tz,)
return cls(*args)
| apache-2.0 |
tropp/acq4 | acq4/pyqtgraph/graphicsItems/PlotItem/plotConfigTemplate_pyqt5.py | 38 | 10666 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/graphicsItems/PlotItem/plotConfigTemplate.ui'
#
# Created: Wed Mar 26 15:09:28 2014
# by: PyQt5 UI code generator 5.0.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(481, 840)
self.averageGroup = QtWidgets.QGroupBox(Form)
self.averageGroup.setGeometry(QtCore.QRect(0, 640, 242, 182))
self.averageGroup.setCheckable(True)
self.averageGroup.setChecked(False)
self.averageGroup.setObjectName("averageGroup")
self.gridLayout_5 = QtWidgets.QGridLayout(self.averageGroup)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.avgParamList = QtWidgets.QListWidget(self.averageGroup)
self.avgParamList.setObjectName("avgParamList")
self.gridLayout_5.addWidget(self.avgParamList, 0, 0, 1, 1)
self.decimateGroup = QtWidgets.QFrame(Form)
self.decimateGroup.setGeometry(QtCore.QRect(10, 140, 191, 171))
self.decimateGroup.setObjectName("decimateGroup")
self.gridLayout_4 = QtWidgets.QGridLayout(self.decimateGroup)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.clipToViewCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.clipToViewCheck.setObjectName("clipToViewCheck")
self.gridLayout_4.addWidget(self.clipToViewCheck, 7, 0, 1, 3)
self.maxTracesCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.maxTracesCheck.setObjectName("maxTracesCheck")
self.gridLayout_4.addWidget(self.maxTracesCheck, 8, 0, 1, 2)
self.downsampleCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.downsampleCheck.setObjectName("downsampleCheck")
self.gridLayout_4.addWidget(self.downsampleCheck, 0, 0, 1, 3)
self.peakRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.peakRadio.setChecked(True)
self.peakRadio.setObjectName("peakRadio")
self.gridLayout_4.addWidget(self.peakRadio, 6, 1, 1, 2)
self.maxTracesSpin = QtWidgets.QSpinBox(self.decimateGroup)
self.maxTracesSpin.setObjectName("maxTracesSpin")
self.gridLayout_4.addWidget(self.maxTracesSpin, 8, 2, 1, 1)
self.forgetTracesCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.forgetTracesCheck.setObjectName("forgetTracesCheck")
self.gridLayout_4.addWidget(self.forgetTracesCheck, 9, 0, 1, 3)
self.meanRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.meanRadio.setObjectName("meanRadio")
self.gridLayout_4.addWidget(self.meanRadio, 3, 1, 1, 2)
self.subsampleRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.subsampleRadio.setObjectName("subsampleRadio")
self.gridLayout_4.addWidget(self.subsampleRadio, 2, 1, 1, 2)
self.autoDownsampleCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.autoDownsampleCheck.setChecked(True)
self.autoDownsampleCheck.setObjectName("autoDownsampleCheck")
self.gridLayout_4.addWidget(self.autoDownsampleCheck, 1, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem, 2, 0, 1, 1)
self.downsampleSpin = QtWidgets.QSpinBox(self.decimateGroup)
self.downsampleSpin.setMinimum(1)
self.downsampleSpin.setMaximum(100000)
self.downsampleSpin.setProperty("value", 1)
self.downsampleSpin.setObjectName("downsampleSpin")
self.gridLayout_4.addWidget(self.downsampleSpin, 1, 1, 1, 1)
self.transformGroup = QtWidgets.QFrame(Form)
self.transformGroup.setGeometry(QtCore.QRect(0, 0, 154, 79))
self.transformGroup.setObjectName("transformGroup")
self.gridLayout = QtWidgets.QGridLayout(self.transformGroup)
self.gridLayout.setObjectName("gridLayout")
self.fftCheck = QtWidgets.QCheckBox(self.transformGroup)
self.fftCheck.setObjectName("fftCheck")
self.gridLayout.addWidget(self.fftCheck, 0, 0, 1, 1)
self.logXCheck = QtWidgets.QCheckBox(self.transformGroup)
self.logXCheck.setObjectName("logXCheck")
self.gridLayout.addWidget(self.logXCheck, 1, 0, 1, 1)
self.logYCheck = QtWidgets.QCheckBox(self.transformGroup)
self.logYCheck.setObjectName("logYCheck")
self.gridLayout.addWidget(self.logYCheck, 2, 0, 1, 1)
self.pointsGroup = QtWidgets.QGroupBox(Form)
self.pointsGroup.setGeometry(QtCore.QRect(10, 550, 234, 58))
self.pointsGroup.setCheckable(True)
self.pointsGroup.setObjectName("pointsGroup")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.pointsGroup)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.autoPointsCheck = QtWidgets.QCheckBox(self.pointsGroup)
self.autoPointsCheck.setChecked(True)
self.autoPointsCheck.setObjectName("autoPointsCheck")
self.verticalLayout_5.addWidget(self.autoPointsCheck)
self.gridGroup = QtWidgets.QFrame(Form)
self.gridGroup.setGeometry(QtCore.QRect(10, 460, 221, 81))
self.gridGroup.setObjectName("gridGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridGroup)
self.gridLayout_2.setObjectName("gridLayout_2")
self.xGridCheck = QtWidgets.QCheckBox(self.gridGroup)
self.xGridCheck.setObjectName("xGridCheck")
self.gridLayout_2.addWidget(self.xGridCheck, 0, 0, 1, 2)
self.yGridCheck = QtWidgets.QCheckBox(self.gridGroup)
self.yGridCheck.setObjectName("yGridCheck")
self.gridLayout_2.addWidget(self.yGridCheck, 1, 0, 1, 2)
self.gridAlphaSlider = QtWidgets.QSlider(self.gridGroup)
self.gridAlphaSlider.setMaximum(255)
self.gridAlphaSlider.setProperty("value", 128)
self.gridAlphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.gridAlphaSlider.setObjectName("gridAlphaSlider")
self.gridLayout_2.addWidget(self.gridAlphaSlider, 2, 1, 1, 1)
self.label = QtWidgets.QLabel(self.gridGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 2, 0, 1, 1)
self.alphaGroup = QtWidgets.QGroupBox(Form)
self.alphaGroup.setGeometry(QtCore.QRect(10, 390, 234, 60))
self.alphaGroup.setCheckable(True)
self.alphaGroup.setObjectName("alphaGroup")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.alphaGroup)
self.horizontalLayout.setObjectName("horizontalLayout")
self.autoAlphaCheck = QtWidgets.QCheckBox(self.alphaGroup)
self.autoAlphaCheck.setChecked(False)
self.autoAlphaCheck.setObjectName("autoAlphaCheck")
self.horizontalLayout.addWidget(self.autoAlphaCheck)
self.alphaSlider = QtWidgets.QSlider(self.alphaGroup)
self.alphaSlider.setMaximum(1000)
self.alphaSlider.setProperty("value", 1000)
self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.alphaSlider.setObjectName("alphaSlider")
self.horizontalLayout.addWidget(self.alphaSlider)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.averageGroup.setToolTip(_translate("Form", "Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available)."))
self.averageGroup.setTitle(_translate("Form", "Average"))
self.clipToViewCheck.setToolTip(_translate("Form", "Plot only the portion of each curve that is visible. This assumes X values are uniformly spaced."))
self.clipToViewCheck.setText(_translate("Form", "Clip to View"))
self.maxTracesCheck.setToolTip(_translate("Form", "If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed."))
self.maxTracesCheck.setText(_translate("Form", "Max Traces:"))
self.downsampleCheck.setText(_translate("Form", "Downsample"))
self.peakRadio.setToolTip(_translate("Form", "Downsample by drawing a saw wave that follows the min and max of the original data. This method produces the best visual representation of the data but is slower."))
self.peakRadio.setText(_translate("Form", "Peak"))
self.maxTracesSpin.setToolTip(_translate("Form", "If multiple curves are displayed in this plot, check \"Max Traces\" and set this value to limit the number of traces that are displayed."))
self.forgetTracesCheck.setToolTip(_translate("Form", "If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden)."))
self.forgetTracesCheck.setText(_translate("Form", "Forget hidden traces"))
self.meanRadio.setToolTip(_translate("Form", "Downsample by taking the mean of N samples."))
self.meanRadio.setText(_translate("Form", "Mean"))
self.subsampleRadio.setToolTip(_translate("Form", "Downsample by taking the first of N samples. This method is fastest and least accurate."))
self.subsampleRadio.setText(_translate("Form", "Subsample"))
self.autoDownsampleCheck.setToolTip(_translate("Form", "Automatically downsample data based on the visible range. This assumes X values are uniformly spaced."))
self.autoDownsampleCheck.setText(_translate("Form", "Auto"))
self.downsampleSpin.setToolTip(_translate("Form", "Downsample data before plotting. (plot every Nth sample)"))
self.downsampleSpin.setSuffix(_translate("Form", "x"))
self.fftCheck.setText(_translate("Form", "Power Spectrum (FFT)"))
self.logXCheck.setText(_translate("Form", "Log X"))
self.logYCheck.setText(_translate("Form", "Log Y"))
self.pointsGroup.setTitle(_translate("Form", "Points"))
self.autoPointsCheck.setText(_translate("Form", "Auto"))
self.xGridCheck.setText(_translate("Form", "Show X Grid"))
self.yGridCheck.setText(_translate("Form", "Show Y Grid"))
self.label.setText(_translate("Form", "Opacity"))
self.alphaGroup.setTitle(_translate("Form", "Alpha"))
self.autoAlphaCheck.setText(_translate("Form", "Auto"))
| mit |
GRASP-ML/ServiceRobots | navigation/robust_navigation/src/torture_test_controller.py | 2 | 2423 | #!/usr/bin/python
#==========================================================
# imports
import rospy
from map_labelling.srv import GoToLocation, GoToLocationResponse, MotionStatus,MotionStatusResponse
import random
locations = [ "grasp",
"charitys_office",
"vending_machines",
"levine_far_corner",
"towne_311",
"towne_321",
"to_skirkanich",
"empty_spot",
"bio_lab" ]
#====================================================
# node exectution
def runNode():
#start node, initial localization is hard coded into launch file
rospy.init_node('torture_test')
rospy.loginfo("torture_test node initialized")
rospy.wait_for_service('goto_location')
rospy.wait_for_service('motion_status')
rospy.loginfo("Navigation services are available")
LocationList = [Location(s) for s in locations]
rospy.sleep(10)
rospy.loginfo("Beginning tests")
#keep running while alive
while not rospy.is_shutdown():
#pick random place to go
L = random.choice(LocationList)
L.goto()
L.print_status()
#======================================
# Location class
#Information and methods pertaining to a location
class Location:
def __init__(self,name):
self.name = name
self.goToService = rospy.ServiceProxy('goto_location', GoToLocation)
self.motionStatus = rospy.ServiceProxy('motion_status',MotionStatus)
self.status = False
#Navigate to this location
def goto(self):
rospy.loginfo("Navigating to "+self.name)
try:
self.goToService(self.name)
except rospy.service.ServiceException:
rospy.logerr("Location " + self.name+ " does not exist.")
return
#wait for arrival
done = False
while not done:
rospy.sleep(1)
motionStatus = self.motionStatus().status
#0=PENDING,1=ACTIVE so these are considered running, anything else means the goal is no longer running
if not(motionStatus == 0 or motionStatus == 1):
done = True
if motionStatus == 3: #3 is succeeded motion
self.status = True
else:
self.status = False
def print_status(self):
if self.status:
rospy.loginfo("Navigating to "+self.name+" succeeded")
else:
rospy.loginfo("Navigating to "+self.name+" failed")
#=================================
# Run main
if __name__ == '__main__':
try:
runNode()
except rospy.ROSInterruptException:
pass
| bsd-3-clause |
derekjchow/models | official/utils/flags/_device.py | 2 | 3007 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flags for managing compute devices. Currently only contains TPU flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from official.utils.flags._conventions import help_wrap
def require_cloud_storage(flag_names):
"""Register a validator to check directory flags.
Args:
flag_names: An iterable of strings containing the names of flags to be
checked.
"""
msg = "TPU requires GCS path for {}".format(", ".join(flag_names))
@flags.multi_flags_validator(["tpu"] + flag_names, message=msg)
def _path_check(flag_values): # pylint: disable=missing-docstring
if flag_values["tpu"] is None:
return True
valid_flags = True
for key in flag_names:
if not flag_values[key].startswith("gs://"):
tf.compat.v1.logging.error("{} must be a GCS path.".format(key))
valid_flags = False
return valid_flags
def define_device(tpu=True):
"""Register device specific flags.
Args:
tpu: Create flags to specify TPU operation.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if tpu:
flags.DEFINE_string(
name="tpu", default=None,
help=help_wrap(
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a "
"grpc://ip.address.of.tpu:8470 url. Passing `local` will use the"
"CPU of the local instance instead. (Good for debugging.)"))
key_flags.append("tpu")
flags.DEFINE_string(
name="tpu_zone", default=None,
help=help_wrap(
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE "
"project from metadata."))
flags.DEFINE_string(
name="tpu_gcp_project", default=None,
help=help_wrap(
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE "
"project from metadata."))
flags.DEFINE_integer(name="num_tpu_shards", default=8,
help=help_wrap("Number of shards (TPU chips)."))
return key_flags
| apache-2.0 |
JacobStevenR/scrapy | scrapy/linkextractors/lxmlhtml.py | 78 | 4344 | """
Link extractor based on lxml.html
"""
import six
from six.moves.urllib.parse import urlparse, urljoin
import lxml.etree as etree
from scrapy.link import Link
from scrapy.utils.misc import arg_to_iter, rel_has_nofollow
from scrapy.utils.python import unique as unique_list, to_native_str
from scrapy.linkextractors import FilteringLinkExtractor
from scrapy.utils.response import get_base_url
# from lxml/src/lxml/html/__init__.py
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
_collect_string_content = etree.XPath("string()")
def _nons(tag):
if isinstance(tag, six.string_types):
if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
return tag.split('}')[-1]
return tag
class LxmlParserLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _iter_links(self, document):
for el in document.iter(etree.Element):
if not self.scan_tag(_nons(el.tag)):
continue
attribs = el.attrib
for attrib in attribs:
if not self.scan_attr(attrib):
continue
yield (el, attrib, attribs[attrib])
def _extract_links(self, selector, response_url, response_encoding, base_url):
links = []
# hacky way to get the underlying lxml parsed document
for el, attr, attr_val in self._iter_links(selector.root):
# pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
try:
attr_val = urljoin(base_url, attr_val)
except ValueError:
continue # skipping bogus links
else:
url = self.process_attr(attr_val)
if url is None:
continue
url = to_native_str(url, encoding=response_encoding)
# to fix relative links after process_value
url = urljoin(response_url, url)
link = Link(url, _collect_string_content(el) or u'',
nofollow=rel_has_nofollow(el.get('rel')))
links.append(link)
return self._deduplicate_if_needed(links)
def extract_links(self, response):
base_url = get_base_url(response)
return self._extract_links(response.selector, response.url, response.encoding, base_url)
def _process_links(self, links):
""" Normalize and filter extracted links
The subclass should override it if neccessary
"""
return self._deduplicate_if_needed(links)
def _deduplicate_if_needed(self, links):
if self.unique:
return unique_list(links, key=lambda link: link.url)
return links
class LxmlLinkExtractor(FilteringLinkExtractor):
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href',), canonicalize=True,
unique=True, process_value=None, deny_extensions=None, restrict_css=()):
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
tag_func = lambda x: x in tags
attr_func = lambda x: x in attrs
lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func,
unique=unique, process=process_value)
super(LxmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,
allow_domains=allow_domains, deny_domains=deny_domains,
restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,
canonicalize=canonicalize, deny_extensions=deny_extensions)
def extract_links(self, response):
base_url = get_base_url(response)
if self.restrict_xpaths:
docs = [subdoc
for x in self.restrict_xpaths
for subdoc in response.xpath(x)]
else:
docs = [response.selector]
all_links = []
for doc in docs:
links = self._extract_links(doc, response.url, response.encoding, base_url)
all_links.extend(self._process_links(links))
return unique_list(all_links)
| bsd-3-clause |
jiangzhuo/kbengine | kbe/res/scripts/common/Lib/test/test_winsound.py | 84 | 9070 | # Ridiculously simple test of the winsound module for Windows.
import unittest
from test import support
support.requires('audio')
import time
import os
import subprocess
winsound = support.import_module('winsound')
ctypes = support.import_module('ctypes')
import winreg
def has_sound(sound):
"""Find out if a particular event is configured with a default sound"""
try:
# Ask the mixer API for the number of devices it knows about.
# When there are no devices, PlaySound will fail.
if ctypes.windll.winmm.mixerGetNumDevs() == 0:
return False
key = winreg.OpenKeyEx(winreg.HKEY_CURRENT_USER,
"AppEvents\Schemes\Apps\.Default\{0}\.Default".format(sound))
return winreg.EnumValue(key, 0)[1] != ""
except OSError:
return False
class BeepTest(unittest.TestCase):
# As with PlaySoundTest, incorporate the _have_soundcard() check
# into our test methods. If there's no audio device present,
# winsound.Beep returns 0 and GetLastError() returns 127, which
# is: ERROR_PROC_NOT_FOUND ("The specified procedure could not
# be found"). (FWIW, virtual/Hyper-V systems fall under this
# scenario as they have no sound devices whatsoever (not even
# a legacy Beep device).)
def test_errors(self):
self.assertRaises(TypeError, winsound.Beep)
self.assertRaises(ValueError, winsound.Beep, 36, 75)
self.assertRaises(ValueError, winsound.Beep, 32768, 75)
def test_extremes(self):
self._beep(37, 75)
self._beep(32767, 75)
def test_increasingfrequency(self):
for i in range(100, 2000, 100):
self._beep(i, 75)
def _beep(self, *args):
# these tests used to use _have_soundcard(), but it's quite
# possible to have a soundcard, and yet have the beep driver
# disabled. So basically, we have no way of knowing whether
# a beep should be produced or not, so currently if these
# tests fail we're ignoring them
#
# XXX the right fix for this is to define something like
# _have_enabled_beep_driver() and use that instead of the
# try/except below
try:
winsound.Beep(*args)
except RuntimeError:
pass
class MessageBeepTest(unittest.TestCase):
def tearDown(self):
time.sleep(0.5)
def test_default(self):
self.assertRaises(TypeError, winsound.MessageBeep, "bad")
self.assertRaises(TypeError, winsound.MessageBeep, 42, 42)
winsound.MessageBeep()
def test_ok(self):
winsound.MessageBeep(winsound.MB_OK)
def test_asterisk(self):
winsound.MessageBeep(winsound.MB_ICONASTERISK)
def test_exclamation(self):
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
def test_hand(self):
winsound.MessageBeep(winsound.MB_ICONHAND)
def test_question(self):
winsound.MessageBeep(winsound.MB_ICONQUESTION)
class PlaySoundTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(TypeError, winsound.PlaySound)
self.assertRaises(TypeError, winsound.PlaySound, "bad", "bad")
self.assertRaises(
RuntimeError,
winsound.PlaySound,
"none", winsound.SND_ASYNC | winsound.SND_MEMORY
)
@unittest.skipUnless(has_sound("SystemAsterisk"),
"No default SystemAsterisk")
def test_alias_asterisk(self):
if _have_soundcard():
winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemAsterisk', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemExclamation"),
"No default SystemExclamation")
def test_alias_exclamation(self):
if _have_soundcard():
winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemExclamation', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemExit"), "No default SystemExit")
def test_alias_exit(self):
if _have_soundcard():
winsound.PlaySound('SystemExit', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemExit', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemHand"), "No default SystemHand")
def test_alias_hand(self):
if _have_soundcard():
winsound.PlaySound('SystemHand', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemHand', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemQuestion"),
"No default SystemQuestion")
def test_alias_question(self):
if _have_soundcard():
winsound.PlaySound('SystemQuestion', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemQuestion', winsound.SND_ALIAS
)
def test_alias_fallback(self):
# In the absense of the ability to tell if a sound was actually
# played, this test has two acceptable outcomes: success (no error,
# sound was theoretically played; although as issue #19987 shows
# a box without a soundcard can "succeed") or RuntimeError. Any
# other error is a failure.
try:
winsound.PlaySound('!"$%&/(#+*', winsound.SND_ALIAS)
except RuntimeError:
pass
def test_alias_nofallback(self):
if _have_soundcard():
# Note that this is not the same as asserting RuntimeError
# will get raised: you cannot convert this to
# self.assertRaises(...) form. The attempt may or may not
# raise RuntimeError, but it shouldn't raise anything other
# than RuntimeError, and that's all we're trying to test
# here. The MS docs aren't clear about whether the SDK
# PlaySound() with SND_ALIAS and SND_NODEFAULT will return
# True or False when the alias is unknown. On Tim's WinXP
# box today, it returns True (no exception is raised). What
# we'd really like to test is that no sound is played, but
# that requires first wiring an eardrum class into unittest
# <wink>.
try:
winsound.PlaySound(
'!"$%&/(#+*',
winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
except RuntimeError:
pass
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'!"$%&/(#+*', winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
def test_stopasync(self):
if _have_soundcard():
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP
)
time.sleep(0.5)
try:
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_NOSTOP
)
except RuntimeError:
pass
else: # the first sound might already be finished
pass
winsound.PlaySound(None, winsound.SND_PURGE)
else:
# Issue 8367: PlaySound(None, winsound.SND_PURGE)
# does not raise on systems without a sound card.
pass
def _get_cscript_path():
"""Return the full path to cscript.exe or None."""
for dir in os.environ.get("PATH", "").split(os.pathsep):
cscript_path = os.path.join(dir, "cscript.exe")
if os.path.exists(cscript_path):
return cscript_path
__have_soundcard_cache = None
def _have_soundcard():
"""Return True iff this computer has a soundcard."""
global __have_soundcard_cache
if __have_soundcard_cache is None:
cscript_path = _get_cscript_path()
if cscript_path is None:
# Could not find cscript.exe to run our VBScript helper. Default
# to True: most computers these days *do* have a soundcard.
return True
check_script = os.path.join(os.path.dirname(__file__),
"check_soundcard.vbs")
p = subprocess.Popen([cscript_path, check_script],
stdout=subprocess.PIPE)
__have_soundcard_cache = not p.wait()
p.stdout.close()
return __have_soundcard_cache
def test_main():
support.run_unittest(BeepTest, MessageBeepTest, PlaySoundTest)
if __name__=="__main__":
test_main()
| lgpl-3.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/google/appengine/runtime/apiproxy.py | 11 | 8364 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Makes API calls to various Google-provided services.
Provides methods for making calls into Google Apphosting services and APIs
from your application code. This code will only work properly from within
the Google Apphosting environment.
"""
import sys
apiproxy_stub_map_loaded = (
'google.appengine.api.apiproxy_stub_map' in sys.modules)
from google.net.proto import ProtocolBuffer
from google.appengine import runtime
from google.appengine.api import apiproxy_rpc
from google3.apphosting.runtime import _apphosting_runtime___python__apiproxy
from google.appengine.runtime import apiproxy_errors
from google.net.proto2.python.public import message
assert (('google.appengine.api.apiproxy_stub_map' in sys.modules) ==
apiproxy_stub_map_loaded), ('apiproxy_stub_map imported which breaks '
'apiproxy_stub_map.GetDefaultAPIProxy due '
'to circular import.')
MEMCACHE_UNAVAILABLE = 9
OK = 0
RPC_FAILED = 1
CALL_NOT_FOUND = 2
ARGUMENT_ERROR = 3
DEADLINE_EXCEEDED = 4
CANCELLED = 5
APPLICATION_ERROR = 6
OTHER_ERROR = 7
OVER_QUOTA = 8
REQUEST_TOO_LARGE = 9
CAPABILITY_DISABLED = 10
FEATURE_DISABLED = 11
RESPONSE_TOO_LARGE = 12
_ExceptionsMap = {
RPC_FAILED:
(apiproxy_errors.RPCFailedError,
"The remote RPC to the application server failed for the call %s.%s()."),
CALL_NOT_FOUND:
(apiproxy_errors.CallNotFoundError,
"The API package '%s' or call '%s()' was not found."),
ARGUMENT_ERROR:
(apiproxy_errors.ArgumentError,
"An error occurred parsing (locally or remotely) the arguments to %s.%s()."),
DEADLINE_EXCEEDED:
(apiproxy_errors.DeadlineExceededError,
"The API call %s.%s() took too long to respond and was cancelled."),
CANCELLED:
(apiproxy_errors.CancelledError,
"The API call %s.%s() was explicitly cancelled."),
OTHER_ERROR:
(apiproxy_errors.Error,
"An error occurred for the API request %s.%s()."),
OVER_QUOTA:
(apiproxy_errors.OverQuotaError,
"The API call %s.%s() required more quota than is available."),
REQUEST_TOO_LARGE:
(apiproxy_errors.RequestTooLargeError,
"The request to API call %s.%s() was too large."),
RESPONSE_TOO_LARGE:
(apiproxy_errors.ResponseTooLargeError,
"The response from API call %s.%s() was too large."),
}
PROTO_BASE_CLASSES = (ProtocolBuffer.ProtocolMessage, message.Message)
class RPC(apiproxy_rpc.RPC):
"""A RPC object, suitable for talking to remote services.
Each instance of this object can be used only once, and should not be reused.
Stores the data members and methods for making RPC calls via the APIProxy.
"""
def __init__(self, *args, **kargs):
"""Constructor for the RPC object. All arguments are optional, and
simply set members on the class. These data members will be
overriden by values passed to MakeCall.
"""
super(RPC, self).__init__(*args, **kargs)
self._result_dict = {}
def _WaitImpl(self):
"""Waits on the API call associated with this RPC. The callback,
if provided, will be executed before Wait() returns. If this RPC
is already complete, or if the RPC was never started, this
function will return immediately.
Raises:
InterruptedError if a callback throws an uncaught exception.
"""
try:
rpc_completed = _apphosting_runtime___python__apiproxy.Wait(self)
except (runtime.DeadlineExceededError, apiproxy_errors.InterruptedError):
raise
except:
exc_class, exc, tb = sys.exc_info()
if (isinstance(exc, SystemError) and
exc.args[0] == 'uncaught RPC exception'):
raise
rpc = None
if hasattr(exc, "_appengine_apiproxy_rpc"):
rpc = exc._appengine_apiproxy_rpc
new_exc = apiproxy_errors.InterruptedError(exc, rpc)
raise new_exc.__class__, new_exc, tb
return True
def _MakeCallImpl(self):
assert isinstance(self.request, PROTO_BASE_CLASSES), 'not isinstance(%r, %r): sys.modules=%r, sys.path=%r' % (
self.request.__class__,
PROTO_BASE_CLASSES,
sys.modules,
sys.path)
assert isinstance(self.response, PROTO_BASE_CLASSES), 'not isinstance(%r, %r): sys.modules=%r, sys.path=%r' % (
self.response.__class__,
PROTO_BASE_CLASSES,
sys.modules,
sys.path)
request_data = self.request.SerializeToString()
self._state = RPC.RUNNING
_apphosting_runtime___python__apiproxy.MakeCall(
self.package, self.call, request_data, self._result_dict,
self._MakeCallDone, self, deadline=(self.deadline or -1))
def _MakeCallDone(self):
self._state = RPC.FINISHING
self.cpu_usage_mcycles = self._result_dict['cpu_usage_mcycles']
if self._result_dict['error'] == APPLICATION_ERROR:
appl_err = self._result_dict['application_error']
if appl_err == MEMCACHE_UNAVAILABLE and self.package == 'memcache':
self._exception = apiproxy_errors.CapabilityDisabledError(
'The memcache service is temporarily unavailable. %s'
% self._result_dict['error_detail'])
else:
self._exception = apiproxy_errors.ApplicationError(
appl_err,
self._result_dict['error_detail'])
elif self._result_dict['error'] == CAPABILITY_DISABLED:
if self._result_dict['error_detail']:
self._exception = apiproxy_errors.CapabilityDisabledError(
self._result_dict['error_detail'])
else:
self._exception = apiproxy_errors.CapabilityDisabledError(
"The API call %s.%s() is temporarily unavailable." % (
self.package, self.call))
elif self._result_dict['error'] == FEATURE_DISABLED:
self._exception = apiproxy_errors.FeatureNotEnabledError(
self._result_dict['error_detail'])
elif self._result_dict['error'] in _ExceptionsMap:
exception_entry = _ExceptionsMap[self._result_dict['error']]
self._exception = exception_entry[0](
exception_entry[1] % (self.package, self.call))
else:
try:
self.response.ParseFromString(self._result_dict['result_string'])
except Exception, e:
self._exception = e
self._Callback()
def CreateRPC():
"""Create a RPC instance. suitable for talking to remote services.
Each RPC instance can be used only once, and should not be reused.
Returns:
an instance of RPC object
"""
return RPC()
def MakeSyncCall(package, call, request, response):
"""Makes a synchronous (i.e. blocking) API call within the specified
package for the specified call method. request and response must be the
appropriately typed ProtocolBuffers for the API call. An exception is
thrown if an error occurs when communicating with the system.
Args:
See MakeCall above.
Raises:
See CheckSuccess() above.
"""
rpc = CreateRPC()
rpc.MakeCall(package, call, request, response)
rpc.Wait()
rpc.CheckSuccess()
def CancelApiCalls():
"""Cancels all outstanding API calls."""
_apphosting_runtime___python__apiproxy.CancelApiCalls()
def GetRequestCpuUsage():
"""Returns the number of megacycles used so far by this request.
Returns:
The number of megacycles used so far by this request. Does not include CPU
used by API calls.
"""
return _apphosting_runtime___python__apiproxy.get_request_cpu_usage()
def GetRequestApiCpuUsage():
"""Returns the number of megacycles used by API calls.
Returns:
The number of megacycles used by API calls so far during this request. Does
not include CPU used by the request code itself.
"""
return _apphosting_runtime___python__apiproxy.get_request_api_cpu_usage()
| lgpl-3.0 |
caikehe/Video-game-based-on-fifengine | utils/util_scripts/path.py | 4 | 32167 | """ path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.2 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
Date: 9 Mar 2007
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison
# sent me a patch for this.
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
from __future__ import generators
import sys, warnings, os, fnmatch, glob, shutil, codecs
try:
# Python 2.5 or higher
from hashlib import md5
except ImportError:
# Python 2.4 or lower
from md5 import new as md5
__version__ = '2.2'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and os.path.isdir(child)
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
| lgpl-2.1 |
Teino1978-Corp/Teino1978-Corp-light_.gitignore | light_light_settings.py | 1 | 2432 | import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
from django.core.urlresolvers import reverse_lazy
LOGIN_URL = reverse_lazy('login')
LOGIN_REDIRECT_URL = reverse_lazy('timeline')
LOGOUT_URL = reverse_lazy('logout')
SECRET_KEY = '_f=fga5pp@z#0^k*94^c&@#&cs+_b!h*s^0-m71*u(-d!-tx5!'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'authenticated',
'indicators',
'data',
'master',
'financials',
'imports',
'reports',
'random_messages',
'design',
'actions',
'test_db',
'temp',
'chat',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
"design.context_processors.menu",
"design.context_processors.chat",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'light.urls'
WSGI_APPLICATION = 'light.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.oracle',
'NAME': 'xe',
'USER': 'light',
'PASSWORD': 'gestionventas',
'HOST': 'localhost',
'PORT': '1521'
}
}
LANGUAGE_CODE = 'es-CL'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static'),
)
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
MEDIA_URL = '/media/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'design', 'templates'),
os.path.join(BASE_DIR, 'actions', 'templates'),
)
SESSION_EXPIRE_AT_BROWSER_CLOSE = True | mit |
dohoangkhiem/ansible-modules-extras | system/puppet.py | 51 | 6731 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import pipes
import stat
DOCUMENTATION = '''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner
version_added: "2.0"
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
required: false
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
required: false
default: None
manifest:
desciption:
- Path to the manifest file to run puppet apply on.
required: false
default: None
show_diff:
description:
- Should puppet return diffs of changes applied. Defaults to off to avoid leaking secret changes by default.
required: false
default: no
choices: [ "yes", "no" ]
facts:
description:
- A dict of values to pass in as persistent external facter facts
required: false
default: None
facter_basename:
description:
- Basename of the facter output file
required: false
default: ansible
environment:
description:
- Puppet environment to be used.
required: false
default: None
requirements: [ puppet ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Run puppet agent and fail if anything goes wrong
- puppet
# Run puppet and timeout in 5 minutes
- puppet: timeout=5m
# Run puppet using a different environment
- puppet: environment=testing
'''
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(default="30m"),
puppetmaster=dict(required=False, default=None),
manifest=dict(required=False, default=None),
show_diff=dict(
default=False, aliases=['show-diff'], type='bool'),
facts=dict(default=None),
facter_basename=dict(default='ansible'),
environment=dict(required=False, default=None),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False)
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.", disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD)
if not p['manifest']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose") % dict(
base_cmd=base_cmd,
)
if p['puppetmaster']:
cmd += " --server %s" % pipes.quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if module.check_mode:
cmd += "--noop "
else:
cmd += "--no-noop "
cmd += pipes.quote(p['manifest'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
TOCyna/tabelinha | flask/lib/python2.7/site-packages/sqlalchemy/util/compat.py | 70 | 6809 | # util/compat.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handle Python version/platform incompatibilities."""
import sys
try:
import threading
except ImportError:
import dummy_threading as threading
py33 = sys.version_info >= (3, 3)
py32 = sys.version_info >= (3, 2)
py3k = sys.version_info >= (3, 0)
py2k = sys.version_info < (3, 0)
py265 = sys.version_info >= (2, 6, 5)
jython = sys.platform.startswith('java')
pypy = hasattr(sys, 'pypy_version_info')
win32 = sys.platform.startswith('win')
cpython = not pypy and not jython # TODO: something better for this ?
import collections
next = next
if py3k:
import pickle
else:
try:
import cPickle as pickle
except ImportError:
import pickle
# work around http://bugs.python.org/issue2646
if py265:
safe_kwarg = lambda arg: arg
else:
safe_kwarg = str
ArgSpec = collections.namedtuple("ArgSpec",
["args", "varargs", "keywords", "defaults"])
if py3k:
import builtins
from inspect import getfullargspec as inspect_getfullargspec
from urllib.parse import (quote_plus, unquote_plus,
parse_qsl, quote, unquote)
import configparser
from io import StringIO
from io import BytesIO as byte_buffer
def inspect_getargspec(func):
return ArgSpec(
*inspect_getfullargspec(func)[0:4]
)
string_types = str,
binary_type = bytes
text_type = str
int_types = int,
iterbytes = iter
def u(s):
return s
def ue(s):
return s
def b(s):
return s.encode("latin-1")
if py32:
callable = callable
else:
def callable(fn):
return hasattr(fn, '__call__')
def cmp(a, b):
return (a > b) - (a < b)
from functools import reduce
print_ = getattr(builtins, "print")
import_ = getattr(builtins, '__import__')
import itertools
itertools_filterfalse = itertools.filterfalse
itertools_filter = filter
itertools_imap = map
from itertools import zip_longest
import base64
def b64encode(x):
return base64.b64encode(x).decode('ascii')
def b64decode(x):
return base64.b64decode(x.encode('ascii'))
else:
from inspect import getargspec as inspect_getfullargspec
inspect_getargspec = inspect_getfullargspec
from urllib import quote_plus, unquote_plus, quote, unquote
from urlparse import parse_qsl
import ConfigParser as configparser
from StringIO import StringIO
from cStringIO import StringIO as byte_buffer
string_types = basestring,
binary_type = str
text_type = unicode
int_types = int, long
def iterbytes(buf):
return (ord(byte) for byte in buf)
def u(s):
# this differs from what six does, which doesn't support non-ASCII
# strings - we only use u() with
# literal source strings, and all our source files with non-ascii
# in them (all are tests) are utf-8 encoded.
return unicode(s, "utf-8")
def ue(s):
return unicode(s, "unicode_escape")
def b(s):
return s
def import_(*args):
if len(args) == 4:
args = args[0:3] + ([str(arg) for arg in args[3]],)
return __import__(*args)
callable = callable
cmp = cmp
reduce = reduce
import base64
b64encode = base64.b64encode
b64decode = base64.b64decode
def print_(*args, **kwargs):
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
for arg in enumerate(args):
if not isinstance(arg, basestring):
arg = str(arg)
fp.write(arg)
import itertools
itertools_filterfalse = itertools.ifilterfalse
itertools_filter = itertools.ifilter
itertools_imap = itertools.imap
from itertools import izip_longest as zip_longest
import time
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
from collections import namedtuple
from operator import attrgetter as dottedgetter
if py3k:
def reraise(tp, value, tb=None, cause=None):
if cause is not None:
value.__cause__ = cause
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
def raise_from_cause(exception, exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_tb = exc_info
reraise(type(exception), exception, tb=exc_tb, cause=exc_value)
else:
exec("def reraise(tp, value, tb=None, cause=None):\n"
" raise tp, value, tb\n")
def raise_from_cause(exception, exc_info=None):
# not as nice as that of Py3K, but at least preserves
# the code line where the issue occurred
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_tb = exc_info
reraise(type(exception), exception, tb=exc_tb)
if py3k:
exec_ = getattr(builtins, 'exec')
else:
def exec_(func_text, globals_, lcl=None):
if lcl is None:
exec('exec func_text in globals_')
else:
exec('exec func_text in globals_, lcl')
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass.
Drops the middle class upon creation.
Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
from contextlib import contextmanager
try:
from contextlib import nested
except ImportError:
# removed in py3k, credit to mitsuhiko for
# workaround
@contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
reraise(exc[0], exc[1], exc[2])
| gpl-2.0 |
blopker/PCLite | pclite/repository/repository.py | 1 | 2083 | '''
The MIT License
Copyright (c) Bo Lopker, http://blopker.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class Repository(object):
"""docstring for Repository"""
def __init__(self, *args):
self.packages = {}
for package in args:
try:
self._add_package(package)
except AttributeError:
continue
def _add_package(self, package):
packages = {}
if package.is_supported():
packages[package.name] = package
self.packages.update(packages)
def _sort_package_list(self, package_list):
def compare(lis):
return lis[0].upper()
return sorted(package_list, key=compare)
def list(self):
install_list = []
for package in self.packages.values():
install_list.append(package.get_list())
return self._sort_package_list(install_list)
def merge(self, repository):
self.packages.update(repository.packages)
return self
def get_package(self, name):
return self.packages.get(name, False)
| mit |
lucasdavila/web2py-appreport | modules/plugin_appreport/libs/appreport/libs/pisa/libs/html5lib/src/html5lib/treewalkers/simpletree.py | 27 | 2414 | import gettext
_ = gettext.gettext
import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given that simpletree has no performant way of getting a node's
next sibling, this implementation returns "nodes" as tuples with the
following content:
1. The parent Node (Element, Document or DocumentFragment)
2. The child index of the current node in its parent's children list
3. A list used as a stack of all ancestors. It is a pair tuple whose
first item is a parent Node and second item is a child index.
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Node
parent, idx, parents = node
node = parent.childNodes[idx]
# testing node.type allows us not to import treebuilders.simpletree
if node.type in (1, 2): # Document or DocumentFragment
return (_base.DOCUMENT,)
elif node.type == 3: # DocumentType
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.type == 4: # TextNode
return _base.TEXT, node.value
elif node.type == 5: # Element
return (_base.ELEMENT, node.namespace, node.name,
node.attributes.items(), node.hasContent())
elif node.type == 6: # CommentNode
return _base.COMMENT, node.data
else:
return _node.UNKNOWN, node.type
def getFirstChild(self, node):
if isinstance(node, tuple): # It might be the root Node
parent, idx, parents = node
parents.append((parent, idx))
node = parent.childNodes[idx]
else:
parents = []
assert node.hasContent(), "Node has no children"
return (node, 0, parents)
def getNextSibling(self, node):
assert isinstance(node, tuple), "Node is not a tuple: " + str(node)
parent, idx, parents = node
idx += 1
if len(parent.childNodes) > idx:
return (parent, idx, parents)
else:
return None
def getParentNode(self, node):
assert isinstance(node, tuple)
parent, idx, parents = node
if parents:
parent, idx = parents.pop()
return parent, idx, parents
else:
# HACK: We could return ``parent`` but None will stop the algorithm the same way
return None
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.