repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
achang97/YouTunes | lib/python2.7/site-packages/youtube_dl/extractor/rds.py | 57 | 2800 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
js_to_json,
)
from ..compat import compat_str
class RDSIE(InfoExtractor):
IE_DESC = 'RDS.ca'
_VALID_URL = r'https?://(?:www\.)?rds\.ca/vid(?:[eé]|%C3%A9)os/(?:[^/]+/)*(?P<id>[^/]+)-\d+\.\d+'
_TESTS = [{
'url': 'http://www.rds.ca/videos/football/nfl/fowler-jr-prend-la-direction-de-jacksonville-3.1132799',
'info_dict': {
'id': '604333',
'display_id': 'fowler-jr-prend-la-direction-de-jacksonville',
'ext': 'mp4',
'title': 'Fowler Jr. prend la direction de Jacksonville',
'description': 'Dante Fowler Jr. est le troisième choix du repêchage 2015 de la NFL. ',
'timestamp': 1430397346,
'upload_date': '20150430',
'duration': 154.354,
'age_limit': 0,
}
}, {
'url': 'http://www.rds.ca/vid%C3%A9os/un-voyage-positif-3.877934',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
item = self._parse_json(self._search_regex(r'(?s)itemToPush\s*=\s*({.+?});', webpage, 'item'), display_id, js_to_json)
video_id = compat_str(item['id'])
title = item.get('title') or self._og_search_title(webpage) or self._html_search_meta(
'title', webpage, 'title', fatal=True)
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage, 'description')
thumbnail = item.get('urlImageBig') or self._og_search_thumbnail(webpage) or self._search_regex(
[r'<link[^>]+itemprop="thumbnailUrl"[^>]+href="([^"]+)"',
r'<span[^>]+itemprop="thumbnailUrl"[^>]+content="([^"]+)"'],
webpage, 'thumbnail', fatal=False)
timestamp = parse_iso8601(self._search_regex(
r'<span[^>]+itemprop="uploadDate"[^>]+content="([^"]+)"',
webpage, 'upload date', fatal=False))
duration = parse_duration(self._search_regex(
r'<span[^>]+itemprop="duration"[^>]+content="([^"]+)"',
webpage, 'duration', fatal=False))
age_limit = self._family_friendly_search(webpage)
return {
'_type': 'url_transparent',
'id': video_id,
'display_id': display_id,
'url': '9c9media:rds_web:%s' % video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'age_limit': age_limit,
'ie_key': 'NineCNineMedia',
}
| mit |
ecederstrand/django | tests/messages_tests/test_cookie.py | 299 | 7424 | import json
from django.contrib.messages import constants
from django.contrib.messages.storage.base import Message
from django.contrib.messages.storage.cookie import (
CookieStorage, MessageDecoder, MessageEncoder,
)
from django.test import SimpleTestCase, override_settings
from django.utils.safestring import SafeData, mark_safe
from .base import BaseTests
def set_cookie_data(storage, messages, invalid=False, encode_empty=False):
"""
Sets ``request.COOKIES`` with the encoded data and removes the storage
backend's loaded data cache.
"""
encoded_data = storage._encode(messages, encode_empty=encode_empty)
if invalid:
# Truncate the first character so that the hash is invalid.
encoded_data = encoded_data[1:]
storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data}
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_cookie_messages_count(storage, response):
"""
Returns an integer containing the number of messages stored.
"""
# Get a list of cookies, excluding ones with a max-age of 0 (because
# they have been marked for deletion).
cookie = response.cookies.get(storage.cookie_name)
if not cookie or cookie['max-age'] == 0:
return 0
data = storage._decode(cookie.value)
if not data:
return 0
if data[-1] == CookieStorage.not_finished:
data.pop()
return len(data)
@override_settings(SESSION_COOKIE_DOMAIN='.example.com', SESSION_COOKIE_SECURE=True, SESSION_COOKIE_HTTPONLY=True)
class CookieTest(BaseTests, SimpleTestCase):
storage_class = CookieStorage
def stored_messages_count(self, storage, response):
return stored_cookie_messages_count(storage, response)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_cookie_setings(self):
"""
Ensure that CookieStorage honors SESSION_COOKIE_DOMAIN, SESSION_COOKIE_SECURE and SESSION_COOKIE_HTTPONLY
Refs #15618 and #20972.
"""
# Test before the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
storage.update(response)
self.assertIn('test', response.cookies['messages'].value)
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], '')
self.assertEqual(response.cookies['messages']['secure'], True)
self.assertEqual(response.cookies['messages']['httponly'], True)
# Test deletion of the cookie (storing with an empty value) after the messages have been consumed
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'test')
for m in storage:
pass # Iterate through the storage to simulate consumption of messages.
storage.update(response)
self.assertEqual(response.cookies['messages'].value, '')
self.assertEqual(response.cookies['messages']['domain'], '.example.com')
self.assertEqual(response.cookies['messages']['expires'], 'Thu, 01-Jan-1970 00:00:00 GMT')
def test_get_bad_cookie(self):
request = self.get_request()
storage = self.storage_class(request)
# Set initial (invalid) data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages, invalid=True)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_max_cookie_length(self):
"""
Tests that, if the data exceeds what is allowed in a cookie, older
messages are removed before saving (and returned by the ``update``
method).
"""
storage = self.get_storage()
response = self.get_response()
# When storing as a cookie, the cookie has constant overhead of approx
# 54 chars, and each message has a constant overhead of about 37 chars
# and a variable overhead of zero in the best case. We aim for a message
# size which will fit 4 messages into the cookie, but not 5.
# See also FallbackTest.test_session_fallback
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
unstored_messages = storage.update(response)
cookie_storing = self.stored_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
self.assertEqual(len(unstored_messages), 1)
self.assertEqual(unstored_messages[0].message, '0' * msg_size)
def test_json_encoder_decoder(self):
"""
Tests that a complex nested data structure containing Message
instances is properly encoded/decoded by the custom JSON
encoder/decoder classes.
"""
messages = [
{
'message': Message(constants.INFO, 'Test message'),
'message_list': [Message(constants.INFO, 'message %s')
for x in range(5)] + [{'another-message':
Message(constants.ERROR, 'error')}],
},
Message(constants.INFO, 'message %s'),
]
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
decoded_messages = json.loads(value, cls=MessageDecoder)
self.assertEqual(messages, decoded_messages)
def test_safedata(self):
"""
Tests that a message containing SafeData is keeping its safe status when
retrieved from the message storage.
"""
def encode_decode(data):
message = Message(constants.DEBUG, data)
encoded = storage._encode(message)
decoded = storage._decode(encoded)
return decoded.message
storage = self.get_storage()
self.assertIsInstance(
encode_decode(mark_safe("<b>Hello Django!</b>")), SafeData)
self.assertNotIsInstance(
encode_decode("<b>Hello Django!</b>"), SafeData)
def test_pre_1_5_message_format(self):
"""
For ticket #22426. Tests whether messages that were set in the cookie
before the addition of is_safedata are decoded correctly.
"""
# Encode the messages using the current encoder.
messages = [Message(constants.INFO, 'message %s') for x in range(5)]
encoder = MessageEncoder(separators=(',', ':'))
encoded_messages = encoder.encode(messages)
# Remove the is_safedata flag from the messages in order to imitate
# the behavior of before 1.5 (monkey patching).
encoded_messages = json.loads(encoded_messages)
for obj in encoded_messages:
obj.pop(1)
encoded_messages = json.dumps(encoded_messages, separators=(',', ':'))
# Decode the messages in the old format (without is_safedata)
decoded_messages = json.loads(encoded_messages, cls=MessageDecoder)
self.assertEqual(messages, decoded_messages)
| bsd-3-clause |
ATIX-AG/ansible | lib/ansible/modules/network/f5/bigip_device_group.py | 24 | 14387 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_group
short_description: Manage device groups on a BIG-IP
description:
- Managing device groups allows you to create HA pairs and clusters
of BIG-IP devices. Usage of this module should be done in conjunction
with the C(bigip_configsync_actions) to sync configuration across
the pair or cluster if auto-sync is disabled.
version_added: "2.5"
options:
name:
description:
- Specifies the name of the device group.
required: True
type:
description:
- Specifies that the type of group. A C(sync-failover) device group
contains devices that synchronize their configuration data and fail
over to one another when a device becomes unavailable. A C(sync-only)
device group has no such failover. When creating a new device group,
this option will default to C(sync-only). This setting cannot be
changed once it has been set.
choices:
- sync-failover
- sync-only
description:
description:
- Description of the device group.
auto_sync:
description:
- Indicates whether configuration synchronization occurs manually or
automatically. When creating a new device group, this option will
default to C(false).
type: bool
save_on_auto_sync:
description:
- When performing an auto-sync, specifies whether the configuration
will be saved or not. If C(false), only the running configuration
will be changed on the device(s) being synced to. When creating a
new device group, this option will default to C(false).
type: bool
full_sync:
description:
- Specifies whether the system synchronizes the entire configuration
during synchronization operations. When C(false), the system performs
incremental synchronization operations, based on the cache size
specified in C(max_incremental_sync_size). Incremental configuration
synchronization is a mechanism for synchronizing a device-group's
configuration among its members, without requiring a full configuration
load for each configuration change. In order for this to work, all
devices in the device-group must initially agree on the configuration.
Typically this requires at least one full configuration load to each
device. When creating a new device group, this option will default
to C(false).
type: bool
max_incremental_sync_size:
description:
- Specifies the size of the changes cache for incremental sync. For example,
using the default, if you make more than 1024 KB worth of incremental
changes, the system performs a full synchronization operation. Using
incremental synchronization operations can reduce the per-device sync/load
time for configuration changes. This setting is relevant only when
C(full_sync) is C(false).
state:
description:
- When C(state) is C(present), ensures the device group exists.
- When C(state) is C(absent), ensures that the device group is removed.
choices:
- present
- absent
notes:
- This module is primarily used as a component of configuring HA pairs of
BIG-IP devices.
- Requires BIG-IP >= 12.1.x.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a sync-only device group
bigip_device_group:
name: foo-group
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Create a sync-only device group with auto-sync enabled
bigip_device_group:
name: foo-group
auto_sync: yes
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
save_on_auto_sync:
description: The new save_on_auto_sync value of the device group.
returned: changed
type: bool
sample: true
full_sync:
description: The new full_sync value of the device group.
returned: changed
type: bool
sample: false
description:
description: The new description of the device group.
returned: changed
type: string
sample: this is a device group
type:
description: The new type of the device group.
returned: changed
type: string
sample: sync-failover
auto_sync:
description: The new auto_sync value of the device group.
returned: changed
type: bool
sample: true
max_incremental_sync_size:
description: The new sync size of the device group
returned: changed
type: int
sample: 1000
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'saveOnAutoSync': 'save_on_auto_sync',
'fullLoadOnSync': 'full_sync',
'autoSync': 'auto_sync',
'incrementalConfigSyncSizeMax': 'max_incremental_sync_size'
}
api_attributes = [
'saveOnAutoSync', 'fullLoadOnSync', 'description', 'type', 'autoSync',
'incrementalConfigSyncSizeMax'
]
returnables = [
'save_on_auto_sync', 'full_sync', 'description', 'type', 'auto_sync',
'max_incremental_sync_size'
]
updatables = [
'save_on_auto_sync', 'full_sync', 'description', 'auto_sync',
'max_incremental_sync_size'
]
@property
def save_on_auto_sync(self):
if self._values['save_on_auto_sync'] is None:
return None
elif self._values['save_on_auto_sync'] in BOOLEANS_TRUE:
return True
else:
return False
@property
def auto_sync(self):
if self._values['auto_sync'] is None:
return None
elif self._values['auto_sync'] in [True, 'enabled']:
return 'enabled'
else:
return 'disabled'
@property
def full_sync(self):
if self._values['full_sync'] is None:
return None
elif self._values['full_sync'] in BOOLEANS_TRUE:
return True
else:
return False
@property
def max_incremental_sync_size(self):
if not self.full_sync and self._values['max_incremental_sync_size'] is not None:
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
[
dict(
msg='"max_incremental_sync_size has no effect if "full_sync" is not true',
version='2.4'
)
]
)
if self._values['max_incremental_sync_size'] is None:
return None
return int(self._values['max_incremental_sync_size'])
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class Changes(Parameters):
@property
def auto_sync(self):
if self._values['auto_sync'] in BOOLEANS_TRUE:
return True
else:
return False
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = Parameters(params=self.module.params)
self.have = None
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Changes(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.tm.cm.device_groups.device_group.exists(
name=self.want.name
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the device group")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.cm.device_groups.device_group.create(
name=self.want.name,
**params
)
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.name
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.name
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.name
)
result = resource.attrs
return Parameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
type=dict(
choices=['sync-failover', 'sync-only']
),
description=dict(),
auto_sync=dict(
type='bool',
default='no'
),
save_on_auto_sync=dict(
type='bool',
),
full_sync=dict(
type='bool'
),
name=dict(
required=True
),
max_incremental_sync_size=dict(),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
cpyou/odoo | addons/sale/tests/__init__.py | 7 | 1131 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_sale_to_invoice
checks = [
test_sale_to_invoice,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
VilleEvitaCake/android_kernel_htc_msm8960 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
jelugbo/tundex | common/djangoapps/reverification/models.py | 66 | 2118 | """
Models for reverification features common to both lms and studio
"""
from datetime import datetime
import pytz
from django.core.exceptions import ValidationError
from django.db import models
from util.validate_on_save import ValidateOnSaveMixin
from xmodule_django.models import CourseKeyField
class MidcourseReverificationWindow(ValidateOnSaveMixin, models.Model):
"""
Defines the start and end times for midcourse reverification for a particular course.
There can be many MidcourseReverificationWindows per course, but they cannot have
overlapping time ranges. This is enforced by this class's clean() method.
"""
# the course that this window is attached to
course_id = CourseKeyField(max_length=255, db_index=True)
start_date = models.DateTimeField(default=None, null=True, blank=True)
end_date = models.DateTimeField(default=None, null=True, blank=True)
def clean(self):
"""
Gives custom validation for the MidcourseReverificationWindow model.
Prevents overlapping windows for any particular course.
"""
query = MidcourseReverificationWindow.objects.filter(
course_id=self.course_id,
end_date__gte=self.start_date,
start_date__lte=self.end_date
)
if query.count() > 0:
raise ValidationError('Reverification windows cannot overlap for a given course.')
@classmethod
def window_open_for_course(cls, course_id):
"""
Returns a boolean, True if the course is currently asking for reverification, else False.
"""
now = datetime.now(pytz.UTC)
return cls.get_window(course_id, now) is not None
@classmethod
def get_window(cls, course_id, date):
"""
Returns the window that is open for a particular course for a particular date.
If no such window is open, or if more than one window is open, returns None.
"""
try:
return cls.objects.get(course_id=course_id, start_date__lte=date, end_date__gte=date)
except cls.DoesNotExist:
return None
| agpl-3.0 |
kuri65536/python-for-android | python3-alpha/python-libs/pyxmpp2/ext/__init__.py | 47 | 1026 | #
# (C) Copyright 2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""
Implementations for `XMPP Extensions`_.
=======================================
Each submodule or subpackage should implement one extension or closely
related group of extensions.
.. _XMPP Extensions: http://xmpp.org/xmpp-protocols/xmpp-extensions/
"""
__docformat__ = "restructuredtext en"
# vi: sts=4 et sw=4
| apache-2.0 |
angr/angr | tests/test_ptmalloc.py | 1 | 8838 | import nose.tools
from angr import SimState, SimHeapPTMalloc
# TODO: Make these tests more architecture-independent (note dependencies of some behavior on chunk metadata size)
def chunk_iterators_are_same(iterator1, iterator2):
for ck in iterator1:
ck2 = next(iterator2)
if ck.base != ck2.base:
return False
if ck.is_free() != ck2.is_free():
return False
try:
next(iterator2)
except StopIteration:
return True
return False
def same_heap_states(state1, state2):
return chunk_iterators_are_same(state1.heap.chunks(), state2.heap.chunks())
def max_sym_var_val(state):
return state.libc.max_variable_size
def run_malloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(max_sym_var_val(s)))
s.heap.malloc(x)
sc.heap.malloc(max_sym_var_val(sc))
nose.tools.assert_true(same_heap_states(s, sc))
def test_malloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_malloc_maximizes_sym_arg, arch
def run_free_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p = s.heap.malloc(50)
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(p))
s.heap.free(x)
sc.heap.free(p)
nose.tools.assert_true(same_heap_states(s, sc))
def test_free_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_free_maximizes_sym_arg, arch
def run_calloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(20))
y = s.solver.BVS("y", 32)
s.solver.add(y.UGE(0))
s.solver.add(y.ULE(6))
s.heap.calloc(x, y)
sc.heap.calloc(20, 6)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_calloc_maximizes_sym_arg, arch
def run_realloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p = s.heap.malloc(50)
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(p))
y = s.solver.BVS("y", 32)
s.solver.add(y.UGE(0))
s.solver.add(y.ULE(max_sym_var_val(s)))
s.heap.realloc(x, y)
sc.heap.realloc(p, max_sym_var_val(sc))
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_realloc_maximizes_sym_arg, arch
def run_malloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.malloc(0x2000)
nose.tools.assert_equal(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_malloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_malloc_no_space_returns_null, arch
def run_calloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.calloc(0x500, 4)
nose.tools.assert_equal(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_calloc_no_space_returns_null, arch
def run_realloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p1 = s.heap.malloc(20)
sc = s.copy()
p2 = s.heap.realloc(p1, 0x2000)
nose.tools.assert_equal(p2, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_realloc_no_space_returns_null, arch
def run_first_fit_and_free_malloced_makes_available(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(50)
s.heap.free(p1)
p2 = s.heap.malloc(30)
nose.tools.assert_equal(p1, p2)
def test_first_fit_and_free_malloced_makes_available():
for arch in ('X86', 'AMD64'):
yield run_first_fit_and_free_malloced_makes_available, arch
def run_free_calloced_makes_available(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.calloc(20, 5)
p1 = s.heap.calloc(30, 4)
s.heap.free(p1)
p2 = s.heap.calloc(15, 8)
nose.tools.assert_equal(p1, p2)
def test_free_calloced_makes_available():
for arch in ('X86', 'AMD64'):
yield run_free_calloced_makes_available, arch
def run_realloc_moves_and_frees(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(60)
s.heap.malloc(200)
p2 = s.heap.realloc(p1, 300)
p3 = s.heap.malloc(30)
nose.tools.assert_equal(p1, p3)
nose.tools.assert_less(p1, p2)
def test_realloc_moves_and_frees():
for arch in ('X86', 'AMD64'):
yield run_realloc_moves_and_frees, arch
def run_realloc_near_same_size(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(61)
s.heap.malloc(80)
sc = s.copy()
p2 = s.heap.realloc(p1, 62)
nose.tools.assert_equal(p1, p2)
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_near_same_size():
for arch in ('X86', 'AMD64'):
yield run_realloc_near_same_size, arch
def run_needs_space_for_metadata(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.malloc(0x1000)
nose.tools.assert_equal(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_needs_space_for_metadata():
for arch in ('X86', 'AMD64'):
yield run_needs_space_for_metadata, arch
def run_unusable_amount_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(0x1000 - 4 * s.heap._chunk_size_t_size)
sc = s.copy()
p = s.heap.malloc(1)
nose.tools.assert_equal(p, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_unusable_amount_returns_null():
for arch in ('X86', 'AMD64'):
yield run_unusable_amount_returns_null, arch
def run_free_null_preserves_state(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
p = s.heap.malloc(40)
s.heap.malloc(50)
s.heap.free(p)
s2 = s.copy()
s2.heap.free(0)
nose.tools.assert_true(same_heap_states(s, s2))
def test_free_null_preserves_state():
for arch in ('X86', 'AMD64'):
yield run_free_null_preserves_state, arch
def run_skips_chunks_too_small(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
p = s.heap.malloc(50)
s.heap.malloc(40)
s.heap.free(p)
p2 = s.heap.calloc(20, 5)
nose.tools.assert_less(p, p2)
def test_skips_chunks_too_small():
for arch in ('X86', 'AMD64'):
yield run_skips_chunks_too_small, arch
def run_calloc_multiplies(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
sc = s.copy()
s.heap.malloc(100)
sc.heap.calloc(4, 25)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_multiplies():
for arch in ('X86', 'AMD64'):
yield run_calloc_multiplies, arch
def run_calloc_clears(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.memory.store(0xd0000000 + 2 * s.heap._chunk_size_t_size, s.solver.BVV(-1, 100 * 8))
sc = s.copy()
p1 = s.heap.calloc(6, 5)
p2 = sc.heap.malloc(30)
v1 = s.memory.load(p1, 30)
v2 = sc.memory.load(p2, 30)
nose.tools.assert_true(s.solver.is_true(v1 == 0))
nose.tools.assert_true(sc.solver.is_true(v2 == -1))
def test_calloc_clears():
for arch in ('X86', 'AMD64'):
yield run_calloc_clears, arch
if __name__ == "__main__":
g = globals().copy()
for func_name, func in g.items():
if func_name.startswith("test_") and hasattr(func, '__call__'):
for r, a in func():
r(a)
| bsd-2-clause |
peterstace/project-euler | OLD_PY_CODE/project_euler_old_old/111/number_theory.py | 2 | 7023 | """This module implements functions that have to do with number theory."""
import random
from operator import mul
_stock_primes = [2, 3, 5, 7, 11, 13, 17, 19]
def int_pow(x, n):
"""Raise x to the power n (if n is negative a ValueError is raised).
intPow(0, 0) is defined to be 0.
"""
if n < 0:
raise ValueError("n must be non-negative")
elif n == 0:
return 1
else:
if n % 2 == 0:
tmp = int_pow(x, n // 2)
return tmp * tmp
else:
return x * int_pow(x, n - 1)
def mod_exp(b, e, m):
"""Calculate b to the e modulo m."""
if e < 0:
raise ValueError("e must be non-negative")
elif e == 0:
return 1
else:
if e % 2 == 0:
tmp = mod_exp(b, e // 2, m)
return tmp * tmp % m
else:
return b * mod_exp(b, e - 1, m) % m
def miller_rabin(n, k):
"""Declare n probably prime with probability at most 1/4^k (if returns true)
otherwise declare n composite (if returns false).
"""
if n <= 4:
raise ValueError("n must be greater than 4")
d = n - 1
s = 0
while d % 2 == 0:
d = d // 2
s += 1
for _ in range(k):
a = random.randint(2, n - 2)
x = mod_exp(a, d, n)
if x in (1, n - 1):
continue
next_loop = False
for r in range(1, s):
x = x * x % n
if x == 1:
return False #composite
if x == n - 1:
next_loop = True
break
if not next_loop:
return False #composite
return True #probably prime
def prime_sieve(n):
"""Calculate all primes up to and including n, and return the list of those
primes. If n is negative, a ValueError is raised.
"""
if n < 0:
raise ValueError("n must be non-negative")
candidates = list(range(n+1))
finish = int(n**0.5)
for i in range(2, finish+1):
if candidates[i]:
candidates[i*i::i] = [None] * len(candidates[i*i::i])
return [i for i in candidates[2:] if i]
def prime(n, primes=_stock_primes):
"""Checks if an integer n is a prime number. If primes is provided,
these can be used to speed up the test."""
if n < 2:
return False
for p in primes:
if p * p > n:
return True
if n % p == 0:
return False
p = primes[-1] + 2
while p * p <= n:
if n % p == 0:
return False
p = p + 2
return True
def isqrt(n):
"""Calculate the integer part of the square root of a natural number n.
Uses a binary search to find the integer square root, and so runs
logarithmically. If n negative, a ValueError is raised.
"""
if n < 0:
raise ValueError("n must be non-negative")
a, b = 0, n+1
while b - a != 1:
mid = (a + b) // 2
if mid*mid <= n:
a = mid
else:
b = mid
return a
def perfect_square(n):
"""Calculate if an integer is a perfect square. Constant time complexity
for most numbers due to modulo tests. Worst case time complexity is logn
when the square of the isqrt is checked against n.
"""
#negative values cannot be perfect squares
if n < 0:
return False
#checks modulo 256
if (n & 7 != 1) and (n & 31 != 4) and (n & 127 != 16) and (n & 191 != 0):
return False
#checks the modulus of n is a quadratic residue mod 9, 5, 7, 13, and 17.
if n % 9 not in (0, 1, 4, 7): return False
if n % 5 not in (0, 1, 4): return False
if n % 7 not in (0, 1, 2, 4): return False
if n % 13 not in (0, 1, 3, 4, 9, 10, 12): return False
if n % 17 not in (0, 1, 2, 4, 8, 9, 13, 15, 16): return False
#check using isqrt
i = isqrt(n)
return i*i == n
def decomp_sieve(n):
"""Calculate the prime decomposition for each number up and including n,
and return the prime decompositions in a list indexed by that number.
"""
result = [dict() for i in range(n+1)]
p = 2
while p <= n:
for pk in range(p, n+1, p):
result[pk][p] = 1
palpha = p*p
while palpha <= n:
for palphak in range(palpha, n+1, palpha):
result[palphak][p] += 1
palpha *= p
while p <= n and result[p]:
p += 1
return result
def decomp(n, primes=_stock_primes):
"""Find the prime decomposition of a natural number. The result is returned
as a dictionary whose keys are powers and values are primes.
E.g. decomp(12) -> {2:2, 3:1}
A list of primes should be provided, with primes at least up to the square
root of n. If the prime list doesn't go that high, a ValueError will be
raised if any primes geater than the square root of the highest prime
provided enters n.
"""
if n < 1:
raise ValueError("n must be positive")
record = {}
if n == 1:
return record
for p in primes:
power = 0
while n % p == 0:
power += 1
n = n // p
if power != 0:
record[p] = power
if p * p > n:
if n != 1:
record[n] = 1 #this is the last prime in the record
return record
#we have run out of primes to check...
last_p = primes[-1]
if last_p * last_p > n:
record[n] = 1
else:
raise ValueError("not enough prime numbers in primes")
def factors(pd):
"""Yields all factors of a number given its prime decomposition."""
if pd:
prime, power = pd.popitem()
vals = [int_pow(prime, i) for i in range(power + 1)]
for partial_factor in factors(pd):
for val in vals:
yield val * partial_factor
pd[prime] = power
else:
yield 1
def sum_divisors(pd):
"""Calculate the lowercase sigma function (sum of divisors) of a natural
number given its prime decomposition pd.
"""
if pd == {}: #decomp corresponds to 1
return 1
else:
return reduce(mul, [(int_pow(p, pd[p]+1)-1) // (p-1) for p in pd])
def num_divisors(pd):
"""Calculates the tau function (number of divisors) of a natural number
given its prime decomposition pd.
"""
if pd == {}:
return 1
else:
return reduce(mul, [pd[p] + 1 for p in pd])
def nominal_record(n, base):
"""Calculate the digital record of a natural number n with a certain
base.
"""
if n < 1:
raise ValueError("n must be >= 1")
if base < 2:
raise ValueError("base must be >= 2")
record = []
while n > 0:
record.insert(0, n % base)
n = n // base
return record
def eval_nominal_record(record, base):
place_value = 1
value = 0
for digit in reversed(record):
value += digit * place_value
place_value *= base
return value
| unlicense |
ericzolf/ansible | lib/ansible/module_utils/facts/sysctl.py | 29 | 1931 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils._text import to_text
def get_sysctl(module, prefixes):
sysctl_cmd = module.get_bin_path('sysctl')
cmd = [sysctl_cmd]
cmd.extend(prefixes)
sysctl = dict()
try:
rc, out, err = module.run_command(cmd)
except (IOError, OSError) as e:
module.warn('Unable to read sysctl: %s' % to_text(e))
rc = 1
if rc == 0:
key = ''
value = ''
for line in out.splitlines():
if not line.strip():
continue
if line.startswith(' '):
# handle multiline values, they will not have a starting key
# Add the newline back in so people can split on it to parse
# lines if they need to.
value += '\n' + line
continue
if key:
sysctl[key] = value.strip()
try:
(key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
except Exception as e:
module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e)))
if key:
sysctl[key] = value.strip()
return sysctl
| gpl-3.0 |
ForkedReposBak/mxnet | example/ssd/dataset/pascal_voc.py | 42 | 10238 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import numpy as np
from dataset.imdb import Imdb
import xml.etree.ElementTree as ET
from evaluate.eval_voc import voc_eval
import cv2
class PascalVoc(Imdb):
"""
Implementation of Imdb for Pascal VOC datasets
Parameters:
----------
image_set : str
set to be used, can be train, val, trainval, test
year : str
year of dataset, can be 2007, 2010, 2012...
devkit_path : str
devkit path of VOC dataset
shuffle : boolean
whether to initial shuffle the image list
is_train : boolean
if true, will load annotations
"""
def __init__(self, image_set, year, devkit_path, shuffle=False, is_train=False,
names='pascal_voc.names'):
super(PascalVoc, self).__init__('voc_' + year + '_' + image_set)
self.image_set = image_set
self.year = year
self.devkit_path = devkit_path
self.data_path = os.path.join(devkit_path, 'VOC' + year)
self.extension = '.jpg'
self.is_train = is_train
self.classes = self._load_class_names(names,
os.path.join(os.path.dirname(__file__), 'names'))
self.config = {'use_difficult': True,
'comp_id': 'comp4',}
self.num_classes = len(self.classes)
self.image_set_index = self._load_image_set_index(shuffle)
self.num_images = len(self.image_set_index)
if self.is_train:
self.labels = self._load_image_labels()
@property
def cache_path(self):
"""
make a directory to store all caches
Returns:
---------
cache path
"""
cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return cache_path
def _load_image_set_index(self, shuffle):
"""
find out which indexes correspond to given image set (train or val)
Parameters:
----------
shuffle : boolean
whether to shuffle the image list
Returns:
----------
entire list of images specified in the setting
"""
image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
with open(image_set_index_file) as f:
image_set_index = [x.strip() for x in f.readlines()]
if shuffle:
np.random.shuffle(image_set_index)
return image_set_index
def image_path_from_index(self, index):
"""
given image index, find out full path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of this image
"""
assert self.image_set_index is not None, "Dataset not initialized"
name = self.image_set_index[index]
image_file = os.path.join(self.data_path, 'JPEGImages', name + self.extension)
assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)
return image_file
def label_from_index(self, index):
"""
given image index, return preprocessed ground-truth
Parameters:
----------
index: int
index of a specific image
Returns:
----------
ground-truths of this image
"""
assert self.labels is not None, "Labels not processed"
return self.labels[index]
def _label_path_from_index(self, index):
"""
given image index, find out annotation path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of annotation file
"""
label_file = os.path.join(self.data_path, 'Annotations', index + '.xml')
assert os.path.exists(label_file), 'Path does not exist: {}'.format(label_file)
return label_file
def _load_image_labels(self):
"""
preprocess all ground-truths
Returns:
----------
labels packed in [num_images x max_num_objects x 5] tensor
"""
temp = []
# load ground-truth from xml annotations
for idx in self.image_set_index:
label_file = self._label_path_from_index(idx)
tree = ET.parse(label_file)
root = tree.getroot()
size = root.find('size')
width = float(size.find('width').text)
height = float(size.find('height').text)
label = []
for obj in root.iter('object'):
difficult = int(obj.find('difficult').text)
# if not self.config['use_difficult'] and difficult == 1:
# continue
cls_name = obj.find('name').text
if cls_name not in self.classes:
continue
cls_id = self.classes.index(cls_name)
xml_box = obj.find('bndbox')
xmin = float(xml_box.find('xmin').text) / width
ymin = float(xml_box.find('ymin').text) / height
xmax = float(xml_box.find('xmax').text) / width
ymax = float(xml_box.find('ymax').text) / height
label.append([cls_id, xmin, ymin, xmax, ymax, difficult])
temp.append(np.array(label))
return temp
def evaluate_detections(self, detections):
"""
top level evaluations
Parameters:
----------
detections: list
result list, each entry is a matrix of detections
Returns:
----------
None
"""
# make all these folders for results
result_dir = os.path.join(self.devkit_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year, 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
self.do_python_eval()
def get_result_file_template(self):
"""
this is a template
VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
Returns:
----------
a string template
"""
res_file_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year, 'Main')
comp_id = self.config['comp_id']
filename = comp_id + '_det_' + self.image_set + '_{:s}.txt'
path = os.path.join(res_file_folder, filename)
return path
def write_pascal_results(self, all_boxes):
"""
write results files in pascal devkit path
Parameters:
----------
all_boxes: list
boxes to be processed [bbox, confidence]
Returns:
----------
None
"""
for cls_ind, cls in enumerate(self.classes):
print('Writing {} VOC results file'.format(cls))
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_set_index):
dets = all_boxes[im_ind]
if dets.shape[0] < 1:
continue
h, w = self._get_imsize(self.image_path_from_index(im_ind))
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
if (int(dets[k, 0]) == cls_ind):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, 1],
int(dets[k, 2] * w) + 1, int(dets[k, 3] * h) + 1,
int(dets[k, 4] * w) + 1, int(dets[k, 5] * h) + 1))
def do_python_eval(self):
"""
python evaluation wrapper
Returns:
----------
None
"""
annopath = os.path.join(self.data_path, 'Annotations', '{:s}.xml')
imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
cache_dir = os.path.join(self.cache_path, self.name)
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self.year) < 2010 else False
print('VOC07 metric? ' + ('Y' if use_07_metric else 'No'))
for cls_ind, cls in enumerate(self.classes):
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, cache_dir,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
print('Mean AP = {:.4f}'.format(np.mean(aps)))
def _get_imsize(self, im_name):
"""
get image size info
Returns:
----------
tuple of (height, width)
"""
img = cv2.imread(im_name)
return (img.shape[0], img.shape[1])
| apache-2.0 |
dvliman/jaikuengine | .google_appengine/google/appengine/_internal/django/template/defaultfilters.py | 23 | 29329 | """Default variable filters."""
import re
from decimal import Decimal, InvalidOperation, ROUND_HALF_UP
import random as random_module
try:
from functools import wraps
except ImportError:
from google.appengine._internal.django.utils.functional import wraps # Python 2.4 fallback.
from google.appengine._internal.django.template import Variable, Library
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.utils import formats
from google.appengine._internal.django.utils.encoding import force_unicode, iri_to_uri
from google.appengine._internal.django.utils.html import conditional_escape
from google.appengine._internal.django.utils.safestring import mark_safe, SafeData
from google.appengine._internal.django.utils.translation import ugettext, ungettext
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_unicode(args[0])
if isinstance(args[0], SafeData) and getattr(func, 'is_safe', False):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
addslashes.is_safe = True
addslashes = stringfilter(addslashes)
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
capfirst.is_safe=True
capfirst = stringfilter(capfirst)
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
from google.appengine._internal.django.utils.html import escapejs
return escapejs(value)
escapejs = stringfilter(escapejs)
def fix_ampersands(value):
"""Replaces ampersands with ``&`` entities."""
from google.appengine._internal.django.utils.html import fix_ampersands
return fix_ampersands(value)
fix_ampersands.is_safe=True
fix_ampersands = stringfilter(fix_ampersands)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completley invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) / (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_unicode(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return u''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_unicode(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return u''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format(u'%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
return mark_safe(formats.number_format(u'%s' % str(d.quantize(exp, ROUND_HALF_UP)), abs(p)))
except InvalidOperation:
return input_val
floatformat.is_safe = True
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_unicode(iri_to_uri(value))
iriencode.is_safe = True
iriencode = stringfilter(iriencode)
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
from google.appengine._internal.django.utils.html import escape
lines = value.split(u'\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = unicode(len(unicode(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, escape(line))
return mark_safe(u'\n'.join(lines))
linenumbers.is_safe = True
linenumbers.needs_autoescape = True
linenumbers = stringfilter(linenumbers)
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
lower.is_safe = True
lower = stringfilter(lower)
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
make_list.is_safe = False
make_list = stringfilter(make_list)
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value))
slugify.is_safe = True
slugify = stringfilter(slugify)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return (u"%" + unicode(arg)) % value
except (ValueError, TypeError):
return u""
stringformat.is_safe = True
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
title.is_safe = True
title = stringfilter(title)
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
from google.appengine._internal.django.utils.text import truncate_words
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return truncate_words(value, length)
truncatewords.is_safe = True
truncatewords = stringfilter(truncatewords)
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
from google.appengine._internal.django.utils.text import truncate_html_words
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return truncate_html_words(value, length)
truncatewords_html.is_safe = True
truncatewords_html = stringfilter(truncatewords_html)
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
upper.is_safe = False
upper = stringfilter(upper)
def urlencode(value):
"""Escapes a value for use in a URL."""
from google.appengine._internal.django.utils.http import urlquote
return urlquote(value)
urlencode.is_safe = False
urlencode = stringfilter(urlencode)
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
from google.appengine._internal.django.utils.html import urlize
return mark_safe(urlize(value, nofollow=True, autoescape=autoescape))
urlize.is_safe=True
urlize.needs_autoescape = True
urlize = stringfilter(urlize)
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
from google.appengine._internal.django.utils.html import urlize
return mark_safe(urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
urlizetrunc.is_safe = True
urlizetrunc.needs_autoescape = True
urlizetrunc = stringfilter(urlizetrunc)
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
wordcount.is_safe = False
wordcount = stringfilter(wordcount)
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
from google.appengine._internal.django.utils.text import wrap
return wrap(value, int(arg))
wordwrap.is_safe = True
wordwrap = stringfilter(wordwrap)
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
ljust.is_safe = True
ljust = stringfilter(ljust)
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
rjust.is_safe = True
rjust = stringfilter(rjust)
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
center.is_safe = True
center = stringfilter(center)
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, u'')
if safe and arg != ';':
return mark_safe(value)
return value
cut = stringfilter(cut)
###################
# HTML STRINGS #
###################
def escape(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
from google.appengine._internal.django.utils.safestring import mark_for_escaping
return mark_for_escaping(value)
escape.is_safe = True
escape = stringfilter(escape)
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
from google.appengine._internal.django.utils.html import escape
return mark_safe(escape(value))
force_escape = stringfilter(force_escape)
force_escape.is_safe = True
def linebreaks(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
from google.appengine._internal.django.utils.html import linebreaks
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
linebreaks.is_safe = True
linebreaks.needs_autoescape = True
linebreaks = stringfilter(linebreaks)
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
if autoescape and not isinstance(value, SafeData):
from google.appengine._internal.django.utils.html import escape
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
linebreaksbr.is_safe = True
linebreaksbr.needs_autoescape = True
linebreaksbr = stringfilter(linebreaksbr)
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
safe.is_safe = True
safe = stringfilter(safe)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_unicode(obj)) for obj in value]
safeseq.is_safe = True
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = u'(%s)' % u'|'.join(tags)
starttag_re = re.compile(ur'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile(u'</%s>' % tags_re)
value = starttag_re.sub(u'', value)
value = endtag_re.sub(u'', value)
return value
removetags.is_safe = True
removetags = stringfilter(removetags)
def striptags(value):
"""Strips all [X]HTML tags."""
from google.appengine._internal.django.utils.html import strip_tags
return strip_tags(value)
striptags.is_safe = True
striptags = stringfilter(striptags)
###################
# LISTS #
###################
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
var_resolve = Variable(arg).resolve
decorated = [(var_resolve(item), item) for item in value]
decorated.sort()
return [item[1] for item in decorated]
dictsort.is_safe = False
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
var_resolve = Variable(arg).resolve
decorated = [(var_resolve(item), item) for item in value]
decorated.sort()
decorated.reverse()
return [item[1] for item in decorated]
dictsortreversed.is_safe = False
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return u''
first.is_safe = False
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_unicode, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
join.is_safe = True
join.needs_autoescape = True
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return u''
last.is_safe = True
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return ''
length.is_safe = True
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
length_is.is_safe = False
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
random.is_safe = True
def slice_(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice
for an introduction.
"""
try:
bits = []
for x in arg.split(u':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
slice_.is_safe = True
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
from google.appengine._internal.django.utils.html import conditional_escape
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
it = iter(second_item) # see if second item is iterable
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = u'\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i+1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs+1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_unicode(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
unordered_list.is_safe = True
unordered_list.needs_autoescape = True
###################
# INTEGERS #
###################
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except:
return value
add.is_safe = False
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
get_digit.is_safe = False
###################
# DATES #
###################
def date(value, arg=None):
"""Formats a date according to the given format."""
from google.appengine._internal.django.utils.dateformat import format
if not value:
return u''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
date.is_safe = False
def time(value, arg=None):
"""Formats a time according to the given format."""
from google.appengine._internal.django.utils import dateformat
if value in (None, u''):
return u''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return dateformat.time_format(value, arg)
except AttributeError:
return ''
time.is_safe = False
def timesince(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
from google.appengine._internal.django.utils.timesince import timesince
if not value:
return u''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return u''
timesince.is_safe = False
def timeuntil(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
from google.appengine._internal.django.utils.timesince import timeuntil
if not value:
return u''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return u''
timeuntil.is_safe = False
###################
# LOGIC #
###################
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
default.is_safe = False
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
default_if_none.is_safe = False
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
divisibleby.is_safe = False
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings accoding to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(u',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
yesno.is_safe = False
###################
# MISC #
###################
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
if bytes < 1024:
return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return ugettext("%s KB") % filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024))
return ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024))
filesizeformat.is_safe = True
def pluralize(value, arg=u's'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if not u',' in arg:
arg = u',' + arg
bits = arg.split(u',')
if len(bits) > 2:
return u''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
pluralize.is_safe = False
def phone2numeric(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
from google.appengine._internal.django.utils.text import phone2numeric
return phone2numeric(value)
phone2numeric.is_safe = True
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
from pprint import pformat
try:
return pformat(value)
except Exception, e:
return u"Error in formatting: %s" % force_unicode(e, errors="replace")
pprint.is_safe = True
# Syntax: register.filter(name of filter, callback)
register.filter(add)
register.filter(addslashes)
register.filter(capfirst)
register.filter(center)
register.filter(cut)
register.filter(date)
register.filter(default)
register.filter(default_if_none)
register.filter(dictsort)
register.filter(dictsortreversed)
register.filter(divisibleby)
register.filter(escape)
register.filter(escapejs)
register.filter(filesizeformat)
register.filter(first)
register.filter(fix_ampersands)
register.filter(floatformat)
register.filter(force_escape)
register.filter(get_digit)
register.filter(iriencode)
register.filter(join)
register.filter(last)
register.filter(length)
register.filter(length_is)
register.filter(linebreaks)
register.filter(linebreaksbr)
register.filter(linenumbers)
register.filter(ljust)
register.filter(lower)
register.filter(make_list)
register.filter(phone2numeric)
register.filter(pluralize)
register.filter(pprint)
register.filter(removetags)
register.filter(random)
register.filter(rjust)
register.filter(safe)
register.filter(safeseq)
register.filter('slice', slice_)
register.filter(slugify)
register.filter(stringformat)
register.filter(striptags)
register.filter(time)
register.filter(timesince)
register.filter(timeuntil)
register.filter(title)
register.filter(truncatewords)
register.filter(truncatewords_html)
register.filter(unordered_list)
register.filter(upper)
register.filter(urlencode)
register.filter(urlize)
register.filter(urlizetrunc)
register.filter(wordcount)
register.filter(wordwrap)
register.filter(yesno)
| apache-2.0 |
Jorge-Rodriguez/ansible | lib/ansible/modules/windows/win_iis_webapppool.py | 28 | 7155 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_webapppool
version_added: "2.0"
short_description: Configure IIS Web Application Pools
description:
- Creates, removes and configures an IIS Web Application Pool.
options:
attributes:
description:
- This field is a free form dictionary value for the application pool
attributes.
- These attributes are based on the naming standard at
U(https://www.iis.net/configreference/system.applicationhost/applicationpools/add#005),
see the examples section for more details on how to set this.
- You can also set the attributes of child elements like cpu and
processModel, see the examples to see how it is done.
- While you can use the numeric values for enums it is recommended to use
the enum name itself, e.g. use SpecificUser instead of 3 for
processModel.identityType.
- managedPipelineMode may be either "Integrated" or "Classic".
- startMode may be either "OnDemand" or "AlwaysRunning".
- Use C(state) module parameter to modify the state of the app pool.
- When trying to set 'processModel.password' and you receive a 'Value
does fall within the expected range' error, you have a corrupted
keystore. Please follow
U(http://structuredsight.com/2014/10/26/im-out-of-range-youre-out-of-range/)
to help fix your host.
name:
description:
- Name of the application pool.
type: str
required: yes
state:
description:
- The state of the application pool.
- If C(absent) will ensure the app pool is removed.
- If C(present) will ensure the app pool is configured and exists.
- If C(restarted) will ensure the app pool exists and will restart, this
is never idempotent.
- If C(started) will ensure the app pool exists and is started.
- If C(stopped) will ensure the app pool exists and is stopped.
type: str
choices: [ absent, present, restarted, started, stopped ]
default: present
seealso:
- module: win_iis_virtualdirectory
- module: win_iis_webapplication
- module: win_iis_webbinding
- module: win_iis_website
author:
- Henrik Wallström (@henrikwallstrom)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Return information about an existing application pool
win_iis_webapppool:
name: DefaultAppPool
state: present
- name: Create a new application pool in 'Started' state
win_iis_webapppool:
name: AppPool
state: started
- name: Stop an application pool
win_iis_webapppool:
name: AppPool
state: stopped
- name: Restart an application pool (non-idempotent)
win_iis_webapppool:
name: AppPool
state: restart
- name: Change application pool attributes using new dict style
win_iis_webapppool:
name: AppPool
attributes:
managedRuntimeVersion: v4.0
autoStart: no
- name: Creates an application pool, sets attributes and starts it
win_iis_webapppool:
name: AnotherAppPool
state: started
attributes:
managedRuntimeVersion: v4.0
autoStart: no
# In the below example we are setting attributes in child element processModel
# https://www.iis.net/configreference/system.applicationhost/applicationpools/add/processmodel
- name: Manage child element and set identity of application pool
win_iis_webapppool:
name: IdentitiyAppPool
state: started
attributes:
managedPipelineMode: Classic
processModel.identityType: SpecificUser
processModel.userName: '{{ansible_user}}'
processModel.password: '{{ansible_password}}'
processModel.loadUserProfile: true
- name: Manage a timespan attribute
win_iis_webapppool:
name: TimespanAppPool
state: started
attributes:
# Timespan with full string "day:hour:minute:second.millisecond"
recycling.periodicRestart.time: "00:00:05:00.000000"
recycling.periodicRestart.schedule: ["00:10:00", "05:30:00"]
# Shortened timespan "hour:minute:second"
processModel.pingResponseTime: "00:03:00"
'''
RETURN = r'''
attributes:
description: Application Pool attributes that were set and processed by this
module invocation.
returned: success
type: dict
sample:
enable32BitAppOnWin64: "true"
managedRuntimeVersion: "v4.0"
managedPipelineMode: "Classic"
info:
description: Information on current state of the Application Pool. See
https://www.iis.net/configreference/system.applicationhost/applicationpools/add#005
for the full list of return attributes based on your IIS version.
returned: success
type: complex
sample:
contains:
attributes:
description: Key value pairs showing the current Application Pool attributes.
returned: success
type: dict
sample:
autoStart: true
managedRuntimeLoader: "webengine4.dll"
managedPipelineMode: "Classic"
name: "DefaultAppPool"
CLRConfigFile: ""
passAnonymousToken: true
applicationPoolSid: "S-1-5-82-1352790163-598702362-1775843902-1923651883-1762956711"
queueLength: 1000
managedRuntimeVersion: "v4.0"
state: "Started"
enableConfigurationOverride: true
startMode: "OnDemand"
enable32BitAppOnWin64: true
cpu:
description: Key value pairs showing the current Application Pool cpu attributes.
returned: success
type: dict
sample:
action: "NoAction"
limit: 0
resetInterval:
Days: 0
Hours: 0
failure:
description: Key value pairs showing the current Application Pool failure attributes.
returned: success
type: dict
sample:
autoShutdownExe: ""
orphanActionExe: ""
rapidFailProtextionInterval:
Days: 0
Hours: 0
name:
description: Name of Application Pool that was processed by this module invocation.
returned: success
type: str
sample: "DefaultAppPool"
processModel:
description: Key value pairs showing the current Application Pool processModel attributes.
returned: success
type: dict
sample:
identityType: "ApplicationPoolIdentity"
logonType: "LogonBatch"
pingInterval:
Days: 0
Hours: 0
recycling:
description: Key value pairs showing the current Application Pool recycling attributes.
returned: success
type: dict
sample:
disallowOverlappingRotation: false
disallowRotationOnConfigChange: false
logEventOnRecycle: "Time,Requests,Schedule,Memory,IsapiUnhealthy,OnDemand,ConfigChange,PrivateMemory"
state:
description: Current runtime state of the pool as the module completed.
returned: success
type: str
sample: "Started"
'''
| gpl-3.0 |
ebagdasa/tempest | tempest/api/orchestration/stacks/test_soft_conf.py | 9 | 7506 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
class TestSoftwareConfig(base.BaseOrchestrationTest):
def setUp(self):
super(TestSoftwareConfig, self).setUp()
self.configs = []
# Add 2 sets of software configuration
self.configs.append(self._config_create('a'))
self.configs.append(self._config_create('b'))
# Create a deployment using config a's id
self._deployment_create(self.configs[0]['id'])
def _config_create(self, suffix):
configuration = {'group': 'script',
'inputs': [],
'outputs': [],
'options': {}}
configuration['name'] = 'heat_soft_config_%s' % suffix
configuration['config'] = '#!/bin/bash echo init-%s' % suffix
api_config = self.client.create_software_config(**configuration)
configuration['id'] = api_config['software_config']['id']
self.addCleanup(self._config_delete, configuration['id'])
self._validate_config(configuration, api_config)
return configuration
def _validate_config(self, configuration, api_config):
# Assert all expected keys are present with matching data
for k in configuration.keys():
self.assertEqual(configuration[k],
api_config['software_config'][k])
def _deployment_create(self, config_id):
self.server_id = data_utils.rand_name('dummy-server')
self.action = 'ACTION_0'
self.status = 'STATUS_0'
self.input_values = {}
self.output_values = []
self.status_reason = 'REASON_0'
self.signal_transport = 'NO_SIGNAL'
self.deployment = self.client.create_software_deploy(
self.server_id, config_id, self.action, self.status,
self.input_values, self.output_values, self.status_reason,
self.signal_transport)
self.deployment_id = self.deployment['software_deployment']['id']
self.addCleanup(self._deployment_delete, self.deployment_id)
def _deployment_delete(self, deploy_id):
self.client.delete_software_deploy(deploy_id)
# Testing that it is really gone
self.assertRaises(
exceptions.NotFound, self.client.get_software_deploy,
self.deployment_id)
def _config_delete(self, config_id):
self.client.delete_software_config(config_id)
# Testing that it is really gone
self.assertRaises(
exceptions.NotFound, self.client.get_software_config, config_id)
@test.attr(type='smoke')
def test_get_software_config(self):
"""Testing software config get."""
for conf in self.configs:
api_config = self.client.get_software_config(conf['id'])
self._validate_config(conf, api_config)
@test.attr(type='smoke')
def test_get_deployment_list(self):
"""Getting a list of all deployments"""
deploy_list = self.client.get_software_deploy_list()
deploy_ids = [deploy['id'] for deploy in
deploy_list['software_deployments']]
self.assertIn(self.deployment_id, deploy_ids)
@test.attr(type='smoke')
def test_get_deployment_metadata(self):
"""Testing deployment metadata get"""
metadata = self.client.get_software_deploy_meta(self.server_id)
conf_ids = [conf['id'] for conf in metadata['metadata']]
self.assertIn(self.configs[0]['id'], conf_ids)
def _validate_deployment(self, action, status, reason, config_id):
deployment = self.client.get_software_deploy(self.deployment_id)
self.assertEqual(action, deployment['software_deployment']['action'])
self.assertEqual(status, deployment['software_deployment']['status'])
self.assertEqual(reason,
deployment['software_deployment']['status_reason'])
self.assertEqual(config_id,
deployment['software_deployment']['config_id'])
@test.attr(type='smoke')
def test_software_deployment_create_validate(self):
"""Testing software deployment was created as expected."""
# Asserting that all fields were created
self.assert_fields_in_dict(
self.deployment['software_deployment'], 'action', 'config_id',
'id', 'input_values', 'output_values', 'server_id', 'status',
'status_reason')
# Testing get for this deployment and verifying parameters
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[0]['id'])
@test.attr(type='smoke')
def test_software_deployment_update_no_metadata_change(self):
"""Testing software deployment update without metadata change."""
metadata = self.client.get_software_deploy_meta(self.server_id)
# Updating values without changing the configuration ID
new_action = 'ACTION_1'
new_status = 'STATUS_1'
new_reason = 'REASON_1'
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[0]['id'],
new_action, new_status, self.input_values, self.output_values,
new_reason, self.signal_transport)
# Verifying get and that the deployment was updated as expected
self._validate_deployment(new_action, new_status,
new_reason, self.configs[0]['id'])
# Metadata should not be changed at this point
test_metadata = self.client.get_software_deploy_meta(self.server_id)
for key in metadata['metadata'][0]:
self.assertEqual(
metadata['metadata'][0][key],
test_metadata['metadata'][0][key])
@test.attr(type='smoke')
def test_software_deployment_update_with_metadata_change(self):
"""Testing software deployment update with metadata change."""
metadata = self.client.get_software_deploy_meta(self.server_id)
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[1]['id'],
self.action, self.status, self.input_values,
self.output_values, self.status_reason, self.signal_transport)
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[1]['id'])
# Metadata should now be changed
new_metadata = self.client.get_software_deploy_meta(self.server_id)
# Its enough to test the ID in this case
meta_id = metadata['metadata'][0]['id']
test_id = new_metadata['metadata'][0]['id']
self.assertNotEqual(meta_id, test_id)
| apache-2.0 |
NoobieDog/nexmon | utilities/wireshark/tools/netscreen2dump.py | 42 | 4423 | #!/usr/bin/env python
"""
Converts netscreen snoop hex-dumps to a hex-dump that text2pcap can read.
Copyright (c) 2004 by Gilbert Ramirez <gram@alumni.rice.edu>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import sys
import re
import os
import stat
import time
class OutputFile:
TIMER_MAX = 99999.9
def __init__(self, name, base_time):
try:
self.fh = open(name, "w")
except IOError, err:
sys.exit(err)
self.base_time = base_time
self.prev_timestamp = 0.0
def PrintPacket(self, timestamp, datalines):
# What do to with the timestamp? I need more data about what
# the netscreen timestamp is, then I can generate one for the text file.
# print "TS:", timestamp.group("time")
try:
timestamp = float(timestamp.group("time"))
except ValueError:
sys.exit("Unable to convert '%s' to floating point." % \
(timestamp,))
# Did we wrap around the timeer max?
if timestamp < self.prev_timestamp:
self.base_time += self.TIMER_MAX
self.prev_timestamp = timestamp
packet_timestamp = self.base_time + timestamp
# Determine the time string to print
gmtime = time.gmtime(packet_timestamp)
subsecs = packet_timestamp - int(packet_timestamp)
assert subsecs <= 0
subsecs = int(subsecs * 10)
print >> self.fh, "%s.%d" % (time.strftime("%Y-%m-%d %H:%M:%S", gmtime), \
subsecs)
# Print the packet data
offset = 0
for lineno, hexgroup in datalines:
hexline = hexgroup.group("hex")
hexpairs = hexline.split()
print >> self.fh, "%08x %s" % (offset, hexline)
offset += len(hexpairs)
# Blank line
print >> self.fh
# Find a timestamp line
re_timestamp = re.compile(r"^(?P<time>\d+\.\d): [\w/]+\((?P<io>.)\)(:| len=)")
# Find a hex dump line
re_hex_line = re.compile(r"(?P<hex>([0-9a-f]{2} ){1,16})\s+(?P<ascii>.){1,16}")
def run(input_filename, output_filename):
try:
ifh = open(input_filename, "r")
except IOError, err:
sys.exit(err)
# Get the file's creation time.
try:
ctime = os.stat(input_filename)[stat.ST_CTIME]
except OSError, err:
sys.exit(err)
output_file = OutputFile(output_filename, ctime)
timestamp = None
datalines = []
lineno = 0
for line in ifh.xreadlines():
lineno += 1
# If we have no timestamp yet, look for one
if not timestamp:
m = re_timestamp.search(line)
if m:
timestamp = m
# Otherwise, look for hex dump lines
else:
m = re_hex_line.search(line)
if m:
datalines.append((lineno, m))
else:
# If we have been gathering hex dump lines,
# and this line is not a hex dump line, then the hex dump
# has finished, and so has the packet. So print the packet
# and reset our variables so we can look for the next packet.
if datalines:
output_file.PrintPacket(timestamp, datalines)
timestamp = None
datalines = []
# At the end of the file we may still have hex dump data in memory.
# If so, print the packet
if datalines:
output_file.PrintPacket(timestamp, datalines)
timestamp = None
datalines = []
def usage():
print >> sys.stderr, "Usage: netscreen2dump.py netscreen-dump-file new-dump-file"
sys.exit(1)
def main():
if len(sys.argv) != 3:
usage()
run(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| gpl-3.0 |
ioram7/keystone-federado-pgid2013 | build/python-keystoneclient/keystoneclient/exceptions.py | 1 | 4260 | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
"""
Exception definitions.
"""
class CommandError(Exception):
pass
class ValidationError(Exception):
pass
class AuthorizationFailure(Exception):
pass
class NoTokenLookupException(Exception):
"""This form of authentication does not support looking up
endpoints from an existing token."""
pass
class EndpointNotFound(Exception):
"""Could not find Service or Region in Service Catalog."""
pass
class EmptyCatalog(Exception):
""" The service catalog is empty. """
pass
class NoUniqueMatch(Exception):
"""Unable to find unique resource"""
pass
class ClientException(Exception):
"""
The base exception class for all exceptions this library raises.
"""
def __init__(self, code, message=None, details=None):
self.code = code
self.message = message or self.__class__.message
self.details = details
def __str__(self):
return "%s (HTTP %s)" % (self.message, self.code)
class BadRequest(ClientException):
"""
HTTP 400 - Bad request: you sent some malformed data.
"""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""
HTTP 401 - Unauthorized: bad credentials.
"""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""
HTTP 403 - Forbidden: your credentials don't give you access to this
resource.
"""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""
HTTP 404 - Not found
"""
http_status = 404
message = "Not found"
class MethodNotAllowed(ClientException):
"""
HTTP 405 - Method not allowed
"""
http_status = 405
message = "Method not allowed"
class Conflict(ClientException):
"""
HTTP 409 - Conflict
"""
http_status = 409
message = "Conflict"
class OverLimit(ClientException):
"""
HTTP 413 - Over limit: you're over the API limits for this time period.
"""
http_status = 413
message = "Over limit"
# NotImplemented is a python keyword.
class HTTPNotImplemented(ClientException):
"""
HTTP 501 - Not Implemented: the server does not support this operation.
"""
http_status = 501
message = "Not Implemented"
class ServiceUnavailable(ClientException):
"""
HTTP 503 - Service Unavailable: The server is currently unavailable.
"""
http_status = 503
message = "Service Unavailable"
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
# so we can do this:
# _code_map = dict((c.http_status, c)
# for c in ClientException.__subclasses__())
#
# Instead, we have to hardcode it:
_code_map = dict((c.http_status, c) for c in [BadRequest,
Unauthorized,
Forbidden,
NotFound,
MethodNotAllowed,
Conflict,
OverLimit,
HTTPNotImplemented,
ServiceUnavailable])
def from_response(response, body):
"""
Return an instance of an ClientException or subclass
based on an requests response.
Usage::
resp = requests.request(...)
if resp.status_code != 200:
raise exception_from_response(resp, resp.text)
"""
cls = _code_map.get(response.status_code, ClientException)
if body:
if hasattr(body, 'keys'):
error = body[body.keys()[0]]
message = error.get('message', None)
details = error.get('details', None)
else:
# If we didn't get back a properly formed error message we
# probably couldn't communicate with Keystone at all.
message = "Unable to communicate with identity service: %s." % body
details = None
return cls(code=response.status_code, message=message, details=details)
else:
return cls(code=response.status_code)
| apache-2.0 |
Magnitus-/DockerFiles | network-setup/image/setup.py | 1 | 5370 | #!/usr/local/bin/python
import os
import datetime
import re
import docker
from jinja2 import Template
from yaml import load
TEMPLATES_DIRECTORY = os.environ['TEMPLATE_DIRECTORY']
CONFIG_DIRECTORY = os.environ['CONFIG_DIRECTORY']
SERVICE_NAMES = {
'dns': os.environ.get('DNS_SERVICE_NAME', 'dns'),
'dhcp': os.environ.get('DHCP_SERVICE_NAME', '{domain}-dhcp')
}
DNS_IMAGE = os.environ.get('DNS_IMAGE')
DHCP_IMAGE = os.environ.get('DHCP_IMAGE')
DNS_TTL = {
'default': 86400,
'short': 300
}
DHCP_LEASE_TIME = {
'default': 600,
'max': 7200
}
DHCP_FILENAME_TEMPLATE = 'dhcp.{domain}.conf'
def get_configurations():
with open('/opt/conf') as config_file:
return load(config_file)
route_ip_regex = re.compile('src[ ](?P<ip>[0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)')
def get_interfaces_ip_maps(configurations):
interfaces_ip_maps = {}
client = docker.from_env()
route_outputs = client.containers.run(
"magnitus/network-setup:latest",
"ip route",
network_mode="host",
remove=True
).decode('utf-8').strip().split('\n')
for name in configurations['networks']:
interface = configurations['networks'][name]['interface']
for route_output in route_outputs:
if not route_output.startswith('default via') and 'dev {interface}'.format(interface=interface) in route_output:
match = route_ip_regex.search(route_output)
if match is not None:
interfaces_ip_maps[interface] = match.group('ip')
return interfaces_ip_maps
def generate_dns_config(configurations):
with open(os.path.join(TEMPLATES_DIRECTORY, 'dns.conf.j2'), 'r') as template_file:
template = Template(template_file.read())
with open(os.path.join(CONFIG_DIRECTORY, 'dns.conf'), 'w+') as configuration_file:
content = template.render(
fallback_dns_server=configurations['fallback_dns_server'],
networks=[{'domain': name} for name in configurations['networks']]
)
configuration_file.write(content)
def generate_dns_zonefiles(configurations, interfaces_ip_maps):
with open(os.path.join(TEMPLATES_DIRECTORY, 'zonefile.j2'), 'r') as template_file:
template = Template(template_file.read())
for name in configurations['networks']:
network = configurations['networks'][name]
with open(os.path.join(CONFIG_DIRECTORY, 'zonefile.{domain}'.format(domain=name)), 'w+') as zonefile:
content = template.render(
ttl=DNS_TTL,
timestamp=datetime.datetime.utcnow().strftime("%Y%m%d"),
domain=name,
network=network,
ip=interfaces_ip_maps[network['interface']]
)
zonefile.write(content)
def generate_dhcp_config(configurations, interfaces_ip_maps):
with open(os.path.join(TEMPLATES_DIRECTORY, 'dhcp.conf.j2'), 'r') as template_file:
template = Template(template_file.read())
for name in configurations['networks']:
network = configurations['networks'][name]
filename = DHCP_FILENAME_TEMPLATE.format(domain=name)
with open(os.path.join(CONFIG_DIRECTORY, filename), 'w+') as configuration_file:
content = template.render(
ip=interfaces_ip_maps[network['interface']],
lease_time=DHCP_LEASE_TIME,
network=network
)
configuration_file.write(content)
def get_container_id():
with open('/etc/hostname', 'r') as hostfile:
return hostfile.read().strip()
def clear_container_if_exists(name):
client = docker.from_env()
containers = client.containers.list(filters={'name': name})
if len(containers) > 0:
container = containers[0]
container.stop()
container.remove()
def launch_services(configurations, container_id):
client = docker.from_env()
clear_container_if_exists("dns-server")
client.containers.run(
DNS_IMAGE,
"-conf {conf}".format(conf=os.path.join(CONFIG_DIRECTORY, 'dns.conf')),
name="dns-server",
network_mode="host",
volumes_from=container_id,
restart_policy={'name': 'always'},
detach=True
)
for name in configurations['networks']:
network = configurations['networks'][name]
container_name = "dhcp-{domain}-server".format(domain=name)
clear_container_if_exists(container_name)
client.containers.run(
DHCP_IMAGE,
name=container_name,
network_mode="host",
volumes_from=container_id,
restart_policy={'name': 'always'},
detach=True,
environment={
"CONF_PATH": os.path.join(CONFIG_DIRECTORY, DHCP_FILENAME_TEMPLATE.format(domain=name)),
"INTERFACE": network['interface']
}
)
if __name__ == "__main__":
configurations = get_configurations()
interfaces_ip_maps = get_interfaces_ip_maps(configurations)
generate_dns_config(configurations)
generate_dns_zonefiles(configurations, interfaces_ip_maps)
generate_dhcp_config(configurations, interfaces_ip_maps)
launch_services(configurations, get_container_id()) | mit |
Immortalin/python-for-android | python3-alpha/python3-src/Lib/test/test_aifc.py | 55 | 3979 | from test.support import findfile, run_unittest, TESTFN
import unittest
import os
import aifc
class AIFCTest(unittest.TestCase):
def setUp(self):
self.f = self.fout = None
self.sndfilepath = findfile('Sine-1000Hz-300ms.aif')
def tearDown(self):
if self.f is not None:
self.f.close()
if self.fout is not None:
try:
self.fout.close()
except (aifc.Error, AttributeError):
pass
try:
os.remove(TESTFN)
except OSError:
pass
def test_skipunknown(self):
#Issue 2245
#This file contains chunk types aifc doesn't recognize.
self.f = aifc.open(self.sndfilepath)
def test_params(self):
f = self.f = aifc.open(self.sndfilepath)
self.assertEqual(f.getnchannels(), 2)
self.assertEqual(f.getsampwidth(), 2)
self.assertEqual(f.getframerate(), 48000)
self.assertEqual(f.getnframes(), 14400)
self.assertEqual(f.getcomptype(), b'NONE')
self.assertEqual(f.getcompname(), b'not compressed')
self.assertEqual(
f.getparams(),
(2, 2, 48000, 14400, b'NONE', b'not compressed'),
)
def test_read(self):
f = self.f = aifc.open(self.sndfilepath)
self.assertEqual(f.tell(), 0)
self.assertEqual(f.readframes(2), b'\x00\x00\x00\x00\x0b\xd4\x0b\xd4')
f.rewind()
pos0 = f.tell()
self.assertEqual(pos0, 0)
self.assertEqual(f.readframes(2), b'\x00\x00\x00\x00\x0b\xd4\x0b\xd4')
pos2 = f.tell()
self.assertEqual(pos2, 2)
self.assertEqual(f.readframes(2), b'\x17t\x17t"\xad"\xad')
f.setpos(pos2)
self.assertEqual(f.readframes(2), b'\x17t\x17t"\xad"\xad')
f.setpos(pos0)
self.assertEqual(f.readframes(2), b'\x00\x00\x00\x00\x0b\xd4\x0b\xd4')
def test_write(self):
f = self.f = aifc.open(self.sndfilepath)
fout = self.fout = aifc.open(TESTFN, 'wb')
fout.aifc()
fout.setparams(f.getparams())
for frame in range(f.getnframes()):
fout.writeframes(f.readframes(1))
fout.close()
fout = self.fout = aifc.open(TESTFN, 'rb')
f.rewind()
self.assertEqual(f.getparams(), fout.getparams())
self.assertEqual(f.readframes(5), fout.readframes(5))
def test_compress(self):
f = self.f = aifc.open(self.sndfilepath)
fout = self.fout = aifc.open(TESTFN, 'wb')
fout.aifc()
fout.setnchannels(f.getnchannels())
fout.setsampwidth(f.getsampwidth())
fout.setframerate(f.getframerate())
fout.setcomptype(b'ULAW', b'foo')
for frame in range(f.getnframes()):
fout.writeframes(f.readframes(1))
fout.close()
self.assertLess(
os.stat(TESTFN).st_size,
os.stat(self.sndfilepath).st_size*0.75,
)
fout = self.fout = aifc.open(TESTFN, 'rb')
f.rewind()
self.assertEqual(f.getparams()[0:3], fout.getparams()[0:3])
self.assertEqual(fout.getcomptype(), b'ULAW')
self.assertEqual(fout.getcompname(), b'foo')
# XXX: this test fails, not sure if it should succeed or not
# self.assertEqual(f.readframes(5), fout.readframes(5))
def test_close(self):
class Wrapfile(object):
def __init__(self, file):
self.file = open(file, 'rb')
self.closed = False
def close(self):
self.file.close()
self.closed = True
def __getattr__(self, attr): return getattr(self.file, attr)
testfile = Wrapfile(self.sndfilepath)
f = self.f = aifc.open(testfile)
self.assertEqual(testfile.closed, False)
f.close()
self.assertEqual(testfile.closed, True)
def test_main():
run_unittest(AIFCTest)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
uclouvain/osis_louvain | base/forms/learning_unit/educational_information/mail_reminder.py | 1 | 3305 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from base.forms.utils.emptyfield import EmptyField
class MailReminderRow(forms.Form):
responsible = EmptyField(label='')
learning_unit_years = EmptyField(label='')
check = forms.BooleanField(required=False, label='')
person_id = forms.IntegerField(widget=forms.HiddenInput(), required=False)
def __init__(self, *args, **kwargs):
self.person = kwargs.pop('person', None)
self.learning_unit_years = kwargs.pop('learning_unit_years', [])
super().__init__(*args, **kwargs)
self.initial['check'] = True
self.initial['responsible'] = self.person
if self.person:
self.initial['person_id'] = self.person.id
self.fields["responsible"].label = "{} {}".format(self.person.last_name, self.person.first_name)
acronym_list = _get_acronyms_concatenation(self.learning_unit_years)
self.initial['learning_unit_years'] = acronym_list
self.fields["learning_unit_years"].label = acronym_list
self.fields["responsible"].widget.attrs['class'] = 'no_label'
def _get_acronyms_concatenation(learning_unit_years):
return ', '.join([learning_unit_yr.acronym for learning_unit_yr in learning_unit_years])
class MailReminderFormset(forms.BaseFormSet):
def __init__(self, *args, list_responsible=None, **kwargs):
self.list_responsible = list_responsible
super().__init__(*args, **kwargs)
def get_form_kwargs(self, index):
kwargs = super().get_form_kwargs(index)
if self.list_responsible:
kwargs['person'] = self.list_responsible[index].get('person')
kwargs['learning_unit_years'] = self.list_responsible[index].get('learning_unit_years')
return kwargs
def get_checked_responsibles(self):
return [{'person': form.cleaned_data.get('person_id'),
'learning_unit_years': form.cleaned_data.get('learning_unit_years')} for form in self.forms if
form.cleaned_data.get('check')]
| agpl-3.0 |
zchking/odoo | addons/resource/faces/observer.py | 433 | 2328 | #@+leo-ver=4
#@+node:@file observer.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
"""
This module contains the base class for all observer objects
"""
#@<< Imports >>
#@+node:<< Imports >>
#@-node:<< Imports >>
#@nl
_is_source_ = True
#@+others
#@+node:class Observer
class Observer(object):
"""
Base Class for all charts and reports.
@var visible: Specifies if the observer is visible
at the navigation bar inside the gui.
@var link_view: syncronizes the marked objects in all views.
"""
#@ << declarations >>
#@+node:<< declarations >>
__type_name__ = None
__type_image__ = None
visible = True
link_view = True
__attrib_completions__ = { "visible" : 'visible = False',
"link_view" : "link_view = False" }
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:register_editors
def register_editors(cls, registry):
pass
register_editors = classmethod(register_editors)
#@-node:register_editors
#@-others
#@-node:class Observer
#@-others
factories = { }
clear_cache_funcs = {}
#@-node:@file observer.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yati-sagade/incubator-airflow | airflow/contrib/operators/gcs_to_bq.py | 1 | 9518 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageToBigQueryOperator(BaseOperator):
"""
Loads files from Google cloud storage into BigQuery.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google cloud storage object name. The object in
Google cloud storage must be a JSON file with the schema fields in it.
:param bucket: The bucket to load from.
:type bucket: string
:param source_objects: List of Google cloud storage URIs to load from.
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type object: list
:param destination_project_dataset_table: The dotted (<project>.)<dataset>.<table>
BigQuery table to load data into. If <project> is not included, project will
be the project defined in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Should not be set when source_format is 'DATASTORE_BACKUP'.
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table.
:param schema_object: string
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: string
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param max_id_key: If set, the name of a column in the BigQuery table
that's to be loaded. Thsi will be used to select the MAX value from
BigQuery after the load occurs. The results will be returned by the
execute() command, which in turn gets stored in XCom for future
operators to use. This can be helpful with incremental loads--during
future executions, you can pick up from the max ID.
:type max_id_key: string
:param bigquery_conn_id: Reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: list
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
Note that 'field' is not available in concurrency with
dataset.table$partition.
:type time_partitioning: dict
"""
template_fields = ('bucket', 'source_objects',
'schema_object', 'destination_project_dataset_table')
template_ext = ('.sql',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=None,
schema_object=None,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
max_id_key=None,
bigquery_conn_id='bigquery_default',
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
schema_update_options=(),
src_fmt_configs={},
time_partitioning={},
*args, **kwargs):
super(GoogleCloudStorageToBigQueryOperator, self).__init__(*args, **kwargs)
# GCS config
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.create_disposition = create_disposition
self.skip_leading_rows = skip_leading_rows
self.write_disposition = write_disposition
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.max_id_key = max_id_key
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.schema_update_options = schema_update_options
self.src_fmt_configs = src_fmt_configs
self.time_partitioning = time_partitioning
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
if not self.schema_fields and self.schema_object \
and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
self.bucket,
self.schema_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
source_uris = ['gs://{}/{}'.format(self.bucket, source_object)
for source_object in self.source_objects]
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.run_load(
destination_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
create_disposition=self.create_disposition,
skip_leading_rows=self.skip_leading_rows,
write_disposition=self.write_disposition,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
schema_update_options=self.schema_update_options,
src_fmt_configs=self.src_fmt_configs,
time_partitioning=self.time_partitioning)
if self.max_id_key:
cursor.execute('SELECT MAX({}) FROM {}'.format(
self.max_id_key,
self.destination_project_dataset_table))
row = cursor.fetchone()
max_id = row[0] if row[0] else 0
self.log.info(
'Loaded BQ data with max %s.%s=%s',
self.destination_project_dataset_table, self.max_id_key, max_id
)
return max_id
| apache-2.0 |
mlesche/deep_seq_pipeline | deep_seq_pipeline/src/general_scripts/createGTFfiles.py | 1 | 18824 | #!/usr/bin/env python
'''
The MIT License (MIT)
Copyright (c) <2014> <Mathias Lesche>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
contact: mat.lesche(at)gmail.com
'''
''' python modules '''
import logging
from argparse import ArgumentParser as ArgumentParser
from argparse import RawDescriptionHelpFormatter
from collections import defaultdict as defaultdict
from collections import OrderedDict
from functools import partial as partial
from re import compile as compile
from time import sleep
from types import NoneType
''' own modules '''
from main.io_module import check_Directorylist
from main.io_module import write_list
from main.io_module import check_Fileslist
from main.io_module import readFile_getList_withSep
from main.main_logger import MainLogger
class Parser(object):
def __init__(self):
self.__parser = ArgumentParser(description="""
Script removes chromosomes and changes chromosome names in a gtf file.
chromosomes which are going to be kept are in a file (-c option) and
next to it (separated by tab) the new name.
The -G option creates a gene gtf file. A gene is represented by one transcript only.
The overlapping exons, which come from different transcripts but the same gene, are merged.
The -I option creates a intron gtf file. By choosing the -m option (G(ene) or T(ranscript)),
one decides if the introns are built on the gene model or for all transcripts.
""", formatter_class=RawDescriptionHelpFormatter)
self.initialiseParser()
self.__classname = self.__class__.__name__
self.__log = False
self.__logger = ''
self.start_logging()
self.__gtf = ''
self.__chrom = ''
self.__output = ''
self.__prefix = ''
self.__gene = False
self.__transcript = False
self.__intron = False
self.__model = ''
def initialiseParser(self):
self.__parser.add_argument('-g', '--gtf', type=str, metavar='FILE', dest='gtf', required=True, help='gtf file with gene, transcript annotation')
self.__parser.add_argument('-c', '--chromosome', type=str, metavar='FILE', dest='chrom', required=True, help='file with the old and new chromosome names (tab separated)')
self.__parser.add_argument('-o', '--output', type=str, metavar='DIRECTORY', dest='output', required=True, help='output directory')
self.__parser.add_argument('-p', '--prefix', type=str, metavar='STRING', dest='prefix', required=True, help='prefix for the new files')
self.__parser.add_argument('-G', '--GENE', dest='gene', action='store_true', help='build a gene GTF file')
self.__parser.add_argument('-I', '--INTRON', dest='intron', action='store_true', help='build a intron GTF file')
self.__parser.add_argument('-m', '--model', type=str, dest='model', default='G', choices=('G', 'T'), help='model for generating introns (default: G[ene] model)')
self.__parser.add_argument('-T', '--TRANSCRIPT', dest='transcript', action='store_true', help='build a transcript GTF file')
def parse(self, inputstring = None):
if isinstance(inputstring, NoneType):
self.__options = self.__parser.parse_args()
else:
self.__options = self.__parser.parse_args(inputstring)
def getParser(self):
return self.__parser
def start_logging(self):
self.__log = True
self.__logger = logging.getLogger('pipeline.createGTF')
def show_log(self, level, message):
if self.__log:
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
else:
print message
def checkDirectory(self):
dirlist = (self.__options.output, )
good, bad = check_Directorylist(dirlist)
if len(good) == 0 or len(good) > 1:
for i in bad:
self.show_log('error', "check output directory (-o): {0}".format(i))
exit(2)
self.__output = good[0]
self.show_log('info', "output directory: {0}".format(self.__output))
def checkFiles(self):
good, bad = check_Fileslist((self.__options.gtf, self.__options.chrom))
if len(good) != 2:
for i in bad:
self.show_log('warning', "check file: {0}".format(i))
self.show_log('error', "check input for gtf and/or chromosome file")
exit(2)
self.__gtf = good[0]
self.show_log('info', "gtf file: {0}".format(self.__gtf))
self.__chrom = good[1]
self.show_log('info', "chromosome file: {0}".format(self.__chrom))
def main(self):
self.__gene = self.__options.gene
self.__intron = self.__options.intron
self.__transcript = self.__options.transcript
self.__model = self.__options.model
if not self.__intron and not self.__transcript and not self.__gene:
self.show_log('info', "choose one of the following options: -G or -I or -T".format(self.__prefix))
exit(2)
self.show_log('info', "build gene file: {0}".format(self.__gene))
self.show_log('info', "build intron file: {0}".format(self.__intron))
if self.__intron:
self.show_log('info', "model for intron build: {0}".format(self.__model))
self.show_log('info', "build transcript file: {0}".format(self.__transcript))
self.checkDirectory()
self.checkFiles()
self.__prefix = self.__options.prefix
self.show_log('info', "prefix: {0}".format(self.__prefix))
sleep(2)
def get_chrom(self):
return self.__chrom
def get_gene(self):
return self.__gene
def get_gtf(self):
return self.__gtf
def get_intron(self):
return self.__intron
def get_model(self):
return self.__model
def get_output(self):
return self.__output
def get_prefix(self):
return self.__prefix
def get_transcript(self):
return self.__transcript
chrom = property(get_chrom, None, None, None)
gene = property(get_gene, None, None, None)
gtf = property(get_gtf, None, None, None)
intron = property(get_intron, None, None, None)
model = property(get_model)
output = property(get_output, None, None, None)
prefix = property(get_prefix, None, None, None)
transcript = property(get_transcript, None, None, None)
class AnnotationGTF(object):
def __init__(self, gtf, chrom, output, prefix, gene, intron, transcript, model):
self.__log = False
self.__logger = ''
self.start_logging()
self.__gtf = gtf
self.__chrom = chrom
self.__chromdict = {}
self.__gene = gene
self.__intron = intron
self.__model = model
self.__transcript = transcript
self.__output = output
self.__prefix = prefix
self.__header = []
self.__gtflist = readFile_getList_withSep(self.__gtf, '\t')
self.__genegtf = []
self.__genedict = defaultdict(partial(defaultdict, set))
self.__geneorder = []
self.__geneline = 'gene_id "GENEID"; gene_name "GENENAME"; gene_source "modified"; gene_biotype "GENETYPE";'
self.__transcriptline = 'gene_id "GENEID"; transcript_id "GENEID-001"; gene_name "GENENAME"; gene_source "modified"; gene_biotype "GENETYPE"; transcript_name "GENENAME-001";'
self.__exonline = 'gene_id "GENEID"; transcript_id "GENEID-001"; exon_number "NUMBER"; gene_name "GENENAME"; gene_source "modified"; gene_biotype "GENETYPE"; transcript_name "GENENAME-001";'
self.__introngtf = []
self.__exnr = compile('exon_number "[0-9]+"')
self.__biotypereg = compile('gene_biotype "[0-9a-zA-z_|]+"')
def start_logging(self):
self.__log = True
self.__logger = logging.getLogger('pipeline.createGTF')
def show_log(self, level, message):
if self.__log:
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
else:
print message
def prepare_chromdict(self):
self.show_log('info', 'chromosome which are kept and names are changed:')
for entry in readFile_getList_withSep(self.__chrom, '\t'):
self.show_log('info', '{0} -> {1}'.format(entry[0], entry[1]))
self.__chromdict[entry[0]] = entry[1]
'''
method removes entries from the gtf and changes the names
'''
def remove_change_gtf(self):
chromremove = set()
linecount = 0
temp = []
for entry in self.__gtflist:
if entry[0].startswith('#'):
self.__header.append('\t'.join(entry)+'\n')
continue
try:
entry[0] = self.__chromdict[entry[0]]
temp.append(entry)
except KeyError:
chromremove.add(entry[0])
linecount += 1
self.show_log('info', 'chromosomes removed: {0}'.format(len(chromremove)))
self.show_log('info', 'lines removed: {0}'.format(linecount))
self.__gtflist = temp
'''
method appends exons from the gene's transcripts to the gene and stores it in an dictionary
'''
def exon_Togenes(self):
for line in self.__gtflist:
genename, geneid = '', ''
if line[2] == 'exon':
start, stop = int(line[3]), int(line[4])
infofield = line[8].split(';')
geneid = [i for i in infofield if 'gene_id' in i][0].split('"')[1]
# some gtf entries don't have gene_name
try:
genename = [i for i in infofield if 'gene_name' in i][0].split('"')[1]
except IndexError:
genename = ''
self.__geneorder.append((geneid, genename))
liste = line[0:3] + [line[6]]
self.__genedict[(geneid, genename)][(start,stop)].add(tuple(liste))
self.__geneorder = list(OrderedDict.fromkeys(self.__geneorder))
'''
method merges overlapping exons by comparing the end coord of exon1 with start coord of exon2. If end coord
exon1 >= start coord exon2 they will be merged. The loop is run as long as the size of the exonlist changes.
'''
def merge_Exons(self):
# print [i for i in self.__genedict if i[0]=='ENSRNOG00000005665']
for key in self.__genedict:
exons = sorted(self.__genedict[key].keys())
while True:
startlength = len(exons)
temp = []
for i in xrange(len(exons)):
if i == len(exons) - 1:
temp.append(exons[i])
exons = temp
continue
if exons[i][1] >= exons[i+1][0] or exons[i][1] == exons[i+1][0]-1:
new = (exons[i][0], max(exons[i][1], exons[i+1][1]))
# keep information about the two merged exons
first = self.__genedict[key][exons[i]]
second = self.__genedict[key][exons[i+1]]
# delete entries
del self.__genedict[key][exons[i]]
del self.__genedict[key][exons[i+1]]
# store the information in the newly created exon
self.__genedict[key][new] = first | second
temp.append(new)
try:
temp.extend(exons[i+2:])
except IndexError:
pass
exons = temp
break
else:
temp.append(exons[i])
if len(exons) == startlength:
break
def get_geneline(self, geneid, startlist, start, end, genetype):
startlist[3] = start
startlist[4] = end
startlist[2] = 'gene'
geneline = self.__geneline.replace("GENEID", geneid[0])
geneline = geneline.replace("GENENAME", geneid[1])
geneline = geneline.replace("GENETYPE", genetype)
startlist.append(geneline)
return '\t'.join(startlist)+'\n'
def get_transcriptline(self, geneid, startlist, start, end, genetype):
startlist[3] = start
startlist[4] = end
startlist[2] = 'transcript'
transline = self.__transcriptline.replace("GENEID", geneid[0])
transline = transline.replace("GENENAME", geneid[1])
transline = transline.replace("GENETYPE", genetype)
startlist.append(transline)
return '\t'.join(startlist)+'\n'
def get_exonline(self, geneid, number, genetype):
exonline = self.__exonline.replace("GENEID", geneid[0])
exonline = exonline.replace("GENENAME", geneid[1])
exonline = exonline.replace("NUMBER", number)
exonline = exonline.replace("GENETYPE", genetype)
return exonline
'''
method builds the lines for the new gene gtf file.
@return: list of strings
'''
def build_Genegtf(self):
temp = []
for geneid in self.__geneorder:
exonline = list(self.__genedict[geneid][self.__genedict[geneid].keys()[0]])[0]
exoncounter = 1
if exonline[-1] == '-':
exonlist = sorted(self.__genedict[geneid])[::-1]
gstart, gend = exonlist[-1][0], exonlist[0][-1]
elif exonline[-1] == '+':
exonlist = sorted(self.__genedict[geneid])
gstart, gend = exonlist[0][0], exonlist[-1][-1]
else:
exonlist = sorted(self.__genedict[geneid])
gstart, gend = exonlist[0][0], exonlist[-1][-1]
wholegenetype = []
# get all biotypes
for exon in exonlist:
line = sorted(self.__genedict[geneid][exon])
wholegenetype.extend([i[1] for i in line])
wholegenetype = sorted(set(wholegenetype))
for exon in exonlist:
line = sorted(self.__genedict[geneid][exon])
# type of the gene e.g. protein coding, nmd transcript
genetype = []
genetype.extend([i[1] for i in line])
# chromosome, genetype, exon
templine = list(line[0])[0:3]
# replace genetype
templine[1] = '|'.join(sorted(set(genetype)))
templine.extend([str(exon[0]),str(exon[1])])
templine.extend(['.', line[0][-1], '.'])
templine.append(self.get_exonline(geneid, str(exoncounter), '|'.join(sorted(set(genetype)))))
# new gtf files from ensembl have an extra line for the whole gene and transcript
if exoncounter == 1:
geneline = self.get_geneline(geneid, templine[0:8], str(gstart), str(gend), '|'.join(wholegenetype))
transline = self.get_transcriptline(geneid, templine[0:8], str(gstart), str(gend), '|'.join(wholegenetype))
temp.append(geneline)
temp.append(transline)
exoncounter += 1
temp.append('\t'.join(templine)+'\n')
self.__genegtf = temp
def build_Intronlines(self, gtflines):
first = gtflines[0][0]
exons = sorted([(int(i[0][3]), int(i[0][4])) for i in gtflines])
introns = [(exons[i][1]+1, exons[i+1][0]-1) for i in xrange(len(exons)-1)]
if first[6] == '-':
introns = introns[::-1]
first = gtflines[0][0]
intronlines = ['\t'.join([first[0], 'intronic', 'exon', str(introns[i][0]), str(introns[i][1]), first[5], first[6], first[7], first[8].replace(self.__exnr.search(first[8]).group(), 'exon_number "'+str(i+1)+'"')]) for i in xrange(len(introns))]
return intronlines
'''
Method prepares the lines for the intron gtf. It can use the transcript or gene gtf.
@param inputlist: list
'''
def build_IntronGTF(self, inputlist):
dictus = defaultdict(list)
temp, usit, gene_transcript = [], [], []
for line in inputlist:
line = line.split('\t')
if line[2] in ('gene', 'transcript'):
geneid = [i.split('"')[1] for i in line[8].split(';') if 'gene_id' in i][0]
dictus[geneid].append('\t'.join(line))
continue
if line[2] == 'exon':
number = int([i.split('"')[1] for i in line[8].split(';') if 'exon_number' in i][0])
# replace biotype
line[8] = line[8].replace(self.__biotypereg.search(line[8]).group(), 'gene_biotype "intron"')
if len(usit) != 0 and number <= usit[-1][-1]:
if len(usit) != 1:
geneid = [i.split('"')[1] for i in usit[0][0][8].split(';') if 'gene_id' in i][0]
# print gene_transcript
# print usit
temp.extend(dictus[geneid])
temp.extend(self.build_Intronlines(usit))
usit, gene_transcript = [], []
usit.append((line, number))
if len(usit) != 1:
temp.extend(gene_transcript)
temp.extend(self.build_Intronlines(usit))
self.__introngtf = temp
def main(self):
self.prepare_chromdict()
self.remove_change_gtf()
if self.__gene:
self.exon_Togenes()
self.merge_Exons()
self.build_Genegtf()
outputname = '{0}{1}.GENE.gtf'.format(self.__output, self.__prefix)
self.show_log('info', 'gene gtf file written to: {0}'.format(outputname))
write_list(self.__header + self.__genegtf, outputname)
if self.__transcript:
gtf = ['\t'.join(i)+'\n' for i in self.__gtflist]
outputname = '{0}{1}.TR.gtf'.format(self.__output, self.__prefix)
self.show_log('info', 'transcript gtf file written to: {0}'.format(outputname))
write_list(self.__header + gtf, outputname)
if self.__intron:
if self.__model == 'G':
if len(self.__genegtf) == 0:
self.exon_Togenes()
self.merge_Exons()
self.build_Genegtf()
self.build_IntronGTF(self.__genegtf)
outputname = '{0}{1}.INTRON.gtf'.format(self.__output, self.__prefix)
elif self.__model == 'T':
self.__gtf = ['\t'.join(i)+'\n' for i in self.__gtflist]
self.build_IntronGTF(self.__gtf)
outputname = '{0}{1}.INTRON-T.gtf'.format(self.__output, self.__prefix)
self.show_log('info', 'intron gtf file written to: {0}'.format(outputname))
write_list(self.__header + self.__introngtf, outputname)
if __name__ == '__main__':
mainlog = MainLogger('', False)
parseinst = Parser()
parseinst.parse()
parseinst.main()
gtfinst = AnnotationGTF(parseinst.gtf, parseinst.chrom, parseinst.output, parseinst.prefix, parseinst.gene, parseinst.intron, parseinst.transcript, parseinst.model)
gtfinst.main()
| mit |
val-iisc/sketch-parse | exp-src/table3.py | 1 | 8623 | import scipy
from scipy import ndimage
import cv2
import numpy as np
import sys
import torch
import resnet_dilated_frozen_r5_D #TODO
import resnet_dilated_frozen_r5_D #TODO
import resnet_dilated_frozen_r5_D_pose #TODO
import resnet_dilated_frozen_r5_D_pose #TODO
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
from collections import OrderedDict
import os
from os import walk
import matplotlib.pyplot as plt
import torch.nn as nn
#import quant
#import pdb
#import matlab.engine
#eng = matlab.engine.start_matlab()
def get_iou(pred,gt,class_):
gt = gt.astype(np.float32)
pred = pred.astype(np.float32)
max_label_dict = {'cow':4,'horse':4,'cat':4,'dog':4,'sheep':4,'bus':6,'car':5,'bicycle':4,'motorbike':4, 'bird':8, 'airplane':5}
max_label = max_label_dict[class_]
count = np.zeros((max_label+1,))
for j in range(max_label+1):
x = np.where(pred==j)
p_idx_j = set(zip(x[0].tolist(),x[1].tolist()))
x = np.where(gt==j)
GT_idx_j = set(zip(x[0].tolist(),x[1].tolist()))
#pdb.set_trace()
n_jj = set.intersection(p_idx_j,GT_idx_j)
u_jj = set.union(p_idx_j,GT_idx_j)
if len(GT_idx_j)!=0:
count[j] = float(len(n_jj))/float(len(u_jj))
result_class = count
Aiou = np.sum(result_class[:])/float(len(np.unique(gt)))
return Aiou
def merge_parts(map_, i):
if i == 4:
map_ = change_parts(map_,7,2)
map_ = change_parts(map_,8,5)
return map_
def change_parts(map_,a,b):
temp = np.where(map_==a)
map_[temp[0],temp[1]] = b
return map_
gpu0 = 0
torch.cuda.set_device(gpu0)
#caffe.set_mode_gpu()
#caffe.set_device(gpu0)
#net = caffe.Net('data/train_d1_contour1by2.prototxt', 'data/train_d1_contour1by2_iter_20000.caffemodel',caffe.TEST)
sketch_root = 'data/sketch-dataset/PNG_untouched/'
model_A = getattr(resnet_dilated_frozen_r5_D,'Res_Deeplab')() #TODO
model_B = getattr(resnet_dilated_frozen_r5_D,'Res_Deeplab')() #TODO
model_C = getattr(resnet_dilated_frozen_r5_D_pose,'Res_Deeplab')() #TODO
model_D = getattr(resnet_dilated_frozen_r5_D_pose,'Res_Deeplab')() #TODO
model_E = getattr(resnet_dilated_frozen_r5_D_pose,'Res_Deeplab')() #TODO
model_A.eval()
model_B.eval()
model_C.eval()
model_D.eval()
model_E.eval()
counter = 0
model_A.cuda()
model_B.cuda()
model_C.cuda()
model_D.cuda()
model_E.cuda()
file_data = open('pred_gt.txt').readlines()
dict_pred = {}
dict_label = {}
for i in file_data:
i_split = i[:-1].split(' ')
dict_pred[i_split[0]] = int(i_split[1])
dict_label[i_split[0]] = int(i_split[2])
prefix_A= 'model_r5_C3_14000.pth' #B_r5
prefix_B= 'model_r5_C3seg2_14000.pth' #BS_r5
prefix_C= 'model_r5_p50x_D5_19000.pth' #BP_r5
prefix_D= 'model_r5_p50x_D1_17000.pth' #BSP_r5
prefix_E= 'model_r5_p50x_D1_17000.pth' #BSP_r5 with 100% router accuracy
for iter in range(1):
saved_state_dict_A = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_A)
saved_state_dict_B = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_B)
saved_state_dict_C = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_C)
saved_state_dict_D = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_D)
saved_state_dict_E = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/'+prefix_E)
#saved_state_dict = torch.load('/data1/ravikiran/pytorch-resnet/snapshots/DeepLab_20k_GB_fix_noCUDNN_bsize1_20k_SegnetLoss_prototype_20000.pth')
if counter==0:
print prefix_A
print prefix_B
print prefix_C
print prefix_D
print prefix_E
counter+=1
#saved_state_dict = torch.load('/data1/ravikiran/pytorch-resnet/MS_DeepLab_resnet_tained_sketches.pth')
model_A.load_state_dict(saved_state_dict_A)
model_B.load_state_dict(saved_state_dict_B)
model_C.load_state_dict(saved_state_dict_C)
model_D.load_state_dict(saved_state_dict_D)
model_E.load_state_dict(saved_state_dict_E)
class_list = ['cow-0', 'horse-0','cat-1','dog-1','sheep-1','bus-2','car-2','bicycle-3','motorbike-3','airplane-4','bird-4'] #TODO
pytorch_list_A = []
pytorch_list_B = []
pytorch_list_C = []
pytorch_list_D = []
pytorch_list_E = []
class_ious_A = []
class_ious_B = []
class_ious_C = []
class_ious_D = []
class_ious_E = []
for class_selector in class_list:
pytorch_per_class_A = []
pytorch_per_class_B = []
pytorch_per_class_C = []
pytorch_per_class_D = []
pytorch_per_class_E = []
class_split = class_selector.split('-')
class_ = class_split[0]
selector = int(class_split[1])
gt_path = 'data/sketch-dataset/test_GT/'+class_
img_list = next(os.walk(gt_path))[2]
path = sketch_root + class_
for i in img_list:
img = cv2.imread(path+'/'+i)
kernel = np.ones((2,2),np.uint8)
# img = cv2.erode(img[:,:,0],kernel,iterations = 1)
img = ndimage.grey_erosion(img[:,:,0].astype(np.uint8), size=(2,2))
img = np.repeat(img[:,:,np.newaxis],3,2)
gt = cv2.imread(gt_path+'/'+i)
selector_pred = dict_pred[i]
output_A = model_A([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector_pred])
output_B = model_B([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector_pred])
output_C = model_C([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector_pred])
output_D = model_D([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector_pred])
output_E = model_E([Variable(torch.from_numpy(img[np.newaxis, :].transpose(0,3,1,2)).float(),volatile=True).cuda(),selector])
#for k in range(4):
# output_temp = output[k].cpu().data[0].numpy()
# output_temp = output_temp.transpose(1,2,0)
# output_temp = np.argmax(output_temp,axis = 2)
# plt.imshow(output_temp)
# plt.show()
interp = nn.UpsamplingBilinear2d(size=(321, 321))
output_A = merge_parts(np.argmax(interp(output_A[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector_pred)
output_B = merge_parts(np.argmax(interp(output_B[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector_pred)
output_C = merge_parts(np.argmax(interp(output_C[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector_pred)
output_D = merge_parts(np.argmax(interp(output_D[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector_pred)
output_E = merge_parts(np.argmax(interp(output_D[3]).cpu().data[0].numpy().transpose(1,2,0),axis =2),selector)
gt = merge_parts(gt, selector)
iou_pytorch_A = get_iou(output_A,gt,class_)
iou_pytorch_B = get_iou(output_B,gt,class_)
iou_pytorch_C = get_iou(output_C,gt,class_)
iou_pytorch_D = get_iou(output_D,gt,class_)
iou_pytorch_E = get_iou(output_E,gt,class_)
pytorch_list_A.append(iou_pytorch_A)
pytorch_list_B.append(iou_pytorch_B)
pytorch_list_C.append(iou_pytorch_C)
pytorch_list_D.append(iou_pytorch_D)
pytorch_list_E.append(iou_pytorch_E)
pytorch_per_class_A.append(iou_pytorch_A)
pytorch_per_class_B.append(iou_pytorch_B)
pytorch_per_class_C.append(iou_pytorch_C)
pytorch_per_class_D.append(iou_pytorch_D)
pytorch_per_class_E.append(iou_pytorch_E)
class_ious_A.append(np.sum(np.asarray(pytorch_per_class_A))/len(pytorch_per_class_A))
class_ious_B.append(np.sum(np.asarray(pytorch_per_class_B))/len(pytorch_per_class_B))
class_ious_C.append(np.sum(np.asarray(pytorch_per_class_C))/len(pytorch_per_class_C))
class_ious_D.append(np.sum(np.asarray(pytorch_per_class_D))/len(pytorch_per_class_D))
class_ious_E.append(np.sum(np.asarray(pytorch_per_class_E))/len(pytorch_per_class_E))
print 'B r5', np.sum(np.asarray(pytorch_list_A))/len(pytorch_list_A),'per class', class_ious_A
print 'BC r5', np.sum(np.asarray(pytorch_list_B))/len(pytorch_list_B),'per class', class_ious_B
print 'BP r5', np.sum(np.asarray(pytorch_list_C))/len(pytorch_list_C),'per class', class_ious_C
print 'BCP r5', np.sum(np.asarray(pytorch_list_D))/len(pytorch_list_D),'per class', class_ious_D
print 'BCP r5 with 100% classifier ', np.sum(np.asarray(pytorch_list_E))/len(pytorch_list_E),'per class', class_ious_E
| mit |
bravo-zhang/spark | sql/hive/src/test/resources/data/scripts/cat.py | 105 | 1040 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import sys
import os
table_name = None
if os.environ in 'hive_streaming_tablename':
table_name = os.environ['hive_streaming_tablename']
for line in sys.stdin:
print(line)
print("dummy", file=sys.stderr)
| apache-2.0 |
danielreed/python-hpOneView | examples/scripts/get-role-category-actions.py | 2 | 3990 | #!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
from pprint import pprint
import hpOneView as hpov
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def getrolecatactions(sec):
ret = sec.get_role_category_actions()
pprint(ret)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display Role Category Actions
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HPE OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HPE OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HPE OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HPE OneView Authorized Login Domain''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
sec = hpov.security(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
getrolecatactions(sec)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| mit |
Isendir/brython | www/src/Lib/logging/config.py | 739 | 35619 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
| bsd-3-clause |
jjhelmus/CyLP | cylp/tests/test_CyCoinMpsIO.py | 4 | 2151 | import unittest
import inspect
import os
from os.path import join
import numpy as np
from cylp.cy import CyCoinMpsIO
from cylp.cy.CyCoinMpsIO import getQpsExample
currentFilePath = os.path.dirname(inspect.getfile(inspect.currentframe()))
class TestCyCoinMpsIO(unittest.TestCase):
def test(self):
problem = CyCoinMpsIO()
problem.readMps(os.path.join(currentFilePath, '../input/hs268.qps'))
self.assertEqual(problem.nVariables, 5)
self.assertEqual(problem.nConstraints, 5)
self.assertTrue([chr(s) for s in problem.constraintSigns] ==
problem.nConstraints * ['G'])
c = problem.matrixByRow
self.assertTrue((abs(c.elements -
np.array([-1., -1., -1., -1., -1., 10.,
10., -3., 5., 4., -8.,
1., -2., -5., 3., 8., -1.,
2., 5., -3., -4., -2.,
3., -5., 1.])) <= 10 ** -8).all())
self.assertTrue((abs(c.indices -
np.array([0, 1, 2, 3, 4, 0, 1, 2,
3, 4, 0, 1, 2, 3, 4, 0,
1, 2, 3, 4, 0, 1, 2,
3, 4])) <= 10 ** -8).all())
self.assertTrue((abs(c.vectorStarts -
np.array([0, 5, 10, 15, 20, 25])) <=
10 ** -8).all())
self.assertTrue((abs(problem.rightHandSide -
np.array([-5., 20., -40., 11., -30.])) <=
10 ** -8).all())
H = problem.Hessian.todense()
self.assertTrue((abs(H -
np.matrix([[20394., -24908., -2026., 3896., 658.],
[-24908., 41818., -3466., -9828., -372.],
[-2026., -3466., 3510., 2178., -348.],
[3896., -9828., 2178., 3030., -44.],
[658., -372., -348., -44., 54.]])) <= 10 ** -8).all())
if __name__ == '__main__':
unittest.main()
| epl-1.0 |
Anonymouslemming/ansible | lib/ansible/modules/network/dellos10/dellos10_config.py | 62 | 10381 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos10_config
version_added: "2.2"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Manage Dell EMC Networking OS10 configuration sections
description:
- OS10 configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with OS10 configuration sections in
a deterministic way.
extends_documentation_fragment: dellos10
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Note the configuration
command syntax as the device config parser automatically modifies some commands. This argument is mutually exclusive with I(src).
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If you omit the parents argument, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root dir. This argument is mutually
exclusive with I(lines).
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. The playbook designer can use this argument
to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. As with I(before),
the playbook designer can use this argument to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If you set
match to I(line), commands match line by line. If you set
match to I(strict), command lines match by
position. If you set match to I(exact), command lines
must be an equal match. Finally, if you set match to I(none), the
module does not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If you set the replace argument to I(line), then
the modified lines push to the device in configuration
mode. If you set the replace argument to I(block), then the entire
command block pushes to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
update:
description:
- The I(update) argument controls how the configuration statements
are processed on the remote device. Valid choices for the I(update)
argument are I(merge) and I(check). When you set the argument to
I(merge), the configuration changes merge with the current
device running configuration. When you set the argument to I(check),
the configuration updates are determined but not actually configured
on the remote device.
required: false
default: merge
choices: ['merge', 'check']
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If you specify check mode, this argument is ignored.
required: false
default: no
choices: ['yes', 'no']
config:
description:
- The playbook designer can use the C(config) argument to supply
the base configuration to be used to validate necessary configuration
changes. If you specify this argument, the module
does not download the running-config from the remote node.
required: false
default: null
backup:
description:
- This argument causes the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
"""
EXAMPLES = """
- dellos10_config:
lines: ['hostname {{ inventory_hostname }}']
provider: "{{ cli }}"
- dellos10_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
- 50 permit ip host 5.5.5.5 any log
parents: ['ip access-list test']
before: ['no ip access-list test']
match: exact
provider: "{{ cli }}"
- dellos10_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
parents: ['ip access-list test']
before: ['no ip access-list test']
replace: block
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands pushed to the remote device.
returned: Always.
type: list
sample: ['...', '...']
responses:
description: The set of responses from issuing the commands on the device.
returned: When not check_mode.
type: list
sample: ['...', '...']
saved:
description: Returns whether the configuration is saved to the startup
configuration or not.
returned: When not check_mode.
type: bool
sample: True
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.dellos10 import get_config, get_sublevel_config
from ansible.module_utils.dellos10 import dellos10_argument_spec, check_args
from ansible.module_utils.dellos10 import load_config, run_commands
from ansible.module_utils.dellos10 import WARNING_PROMPTS_RE
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def main():
argument_spec = dict(
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
src=dict(type='path'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line',
choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
update=dict(choices=['merge', 'check'], default='merge'),
save=dict(type='bool', default=False),
config=dict(),
backup=dict(type='bool', default=False)
)
argument_spec.update(dellos10_argument_spec)
mutually_exclusive = [('lines', 'src')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
parents = module.params['parents'] or list()
match = module.params['match']
replace = module.params['replace']
warnings = list()
check_args(module, warnings)
result = dict(changed=False, saved=False, warnings=warnings)
candidate = get_candidate(module)
if match != 'none':
config = get_config(module)
if parents:
contents = get_sublevel_config(config, module)
config = NetworkConfig(contents=contents, indent=1)
else:
config = NetworkConfig(contents=config, indent=1)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if module.params['backup']:
result['__backup__'] = get_config(module)
commands = list()
if configobjs:
commands = dumps(configobjs, 'commands')
commands = commands.split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
if not module.check_mode and module.params['update'] == 'merge':
load_config(module, commands)
if module.params['save']:
cmd = {'command': 'copy runing-config startup-config', 'prompt': WARNING_PROMPTS_RE, 'answer': 'yes'}
run_commands(module, [cmd])
result['saved'] = True
result['changed'] = True
result['updates'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Lab603/PicEncyclopedias | jni-build/jni/include/tensorflow/models/rnn/ptb/reader_test.py | 14 | 2036 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.models.ptb_lstm.ptb_reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from tensorflow.models.rnn.ptb import reader
class PtbReaderTest(tf.test.TestCase):
def setUp(self):
self._string_data = "\n".join(
[" hello there i am",
" rain as day",
" want some cheesy puffs ?"])
def testPtbRawData(self):
tmpdir = tf.test.get_temp_dir()
for suffix in "train", "valid", "test":
filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
with tf.gfile.GFile(filename, "w") as fh:
fh.write(self._string_data)
# Smoke test
output = reader.ptb_raw_data(tmpdir)
self.assertEqual(len(output), 4)
def testPtbIterator(self):
raw_data = [4, 3, 2, 1, 0, 5, 6, 1, 1, 1, 1, 0, 3, 4, 1]
batch_size = 3
num_steps = 2
output = list(reader.ptb_iterator(raw_data, batch_size, num_steps))
self.assertEqual(len(output), 2)
o1, o2 = (output[0], output[1])
self.assertEqual(o1[0].shape, (batch_size, num_steps))
self.assertEqual(o1[1].shape, (batch_size, num_steps))
self.assertEqual(o2[0].shape, (batch_size, num_steps))
self.assertEqual(o2[1].shape, (batch_size, num_steps))
if __name__ == "__main__":
tf.test.main()
| mit |
izapolsk/integration_tests | cfme/tests/infrastructure/test_cluster_analysis.py | 1 | 1373 | import pytest
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.tier(2),
test_requirements.smartstate,
pytest.mark.usefixtures("setup_provider"),
pytest.mark.provider([InfraProvider], selector=ONE_PER_TYPE),
]
def test_run_cluster_analysis(appliance, provider):
"""Tests smarthost analysis
Metadata:
test_flag: cluster_analysis
Polarion:
assignee: sbulage
casecomponent: SmartState
initialEstimate: 1/3h
"""
cluster_coll = appliance.collections.clusters.filter({'provider': provider})
test_cluster = cluster_coll.all()[0]
test_cluster.wait_for_exists()
# Initiate analysis
# Todo add check for task completion, for cluster task is not available for now
test_cluster.run_smartstate_analysis()
cluster_view = navigate_to(test_cluster, 'Details')
drift_num = wait_for(lambda: cluster_view.entities.relationships.get_text_of('Drift History'),
delay=20, timeout='5m', fail_func=appliance.server.browser.refresh,
fail_condition='None')
assert drift_num != '0', 'No drift history change found'
| gpl-2.0 |
Parsl/parsl | parsl/executors/high_throughput/zmq_pipes.py | 1 | 5931 | #!/usr/bin/env python3
import zmq
import time
import pickle
import logging
import threading
logger = logging.getLogger(__name__)
class CommandClient(object):
""" CommandClient
"""
def __init__(self, ip_address, port_range):
"""
Parameters
----------
ip_address: str
IP address of the client (where Parsl runs)
port_range: tuple(int, int)
Port range for the comms between client and interchange
"""
self.context = zmq.Context()
self.ip_address = ip_address
self.port_range = port_range
self.port = None
self.create_socket_and_bind()
self._lock = threading.Lock()
def create_socket_and_bind(self):
""" Creates socket and binds to a port.
Upon recreating the socket, we bind to the same port.
"""
self.zmq_socket = self.context.socket(zmq.REQ)
self.zmq_socket.setsockopt(zmq.LINGER, 0)
if self.port is None:
self.port = self.zmq_socket.bind_to_random_port("tcp://{}".format(self.ip_address),
min_port=self.port_range[0],
max_port=self.port_range[1])
else:
self.zmq_socket.bind("tcp://{}:{}".format(self.ip_address, self.port))
def run(self, message, max_retries=3):
""" This function needs to be fast at the same time aware of the possibility of
ZMQ pipes overflowing.
The timeout increases slowly if contention is detected on ZMQ pipes.
We could set copy=False and get slightly better latency but this results
in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.
This issue can be magnified if each the serialized buffer itself is larger.
"""
reply = '__PARSL_ZMQ_PIPES_MAGIC__'
with self._lock:
for i in range(max_retries):
try:
self.zmq_socket.send_pyobj(message, copy=True)
reply = self.zmq_socket.recv_pyobj()
except zmq.ZMQError:
logger.exception("Potential ZMQ REQ-REP deadlock caught")
logger.info("Trying to reestablish context")
self.zmq_socket.close()
self.context.destroy()
self.context = zmq.Context()
self.create_socket_and_bind()
else:
break
if reply == '__PARSL_ZMQ_PIPES_MAGIC__':
logger.error("Command channel run retries exhausted. Unable to run command")
raise Exception("Command Channel retries exhausted")
return reply
def close(self):
self.zmq_socket.close()
self.context.term()
class TasksOutgoing(object):
""" Outgoing task queue from the executor to the Interchange
"""
def __init__(self, ip_address, port_range):
"""
Parameters
----------
ip_address: str
IP address of the client (where Parsl runs)
port_range: tuple(int, int)
Port range for the comms between client and interchange
"""
self.context = zmq.Context()
self.zmq_socket = self.context.socket(zmq.DEALER)
self.zmq_socket.set_hwm(0)
self.port = self.zmq_socket.bind_to_random_port("tcp://{}".format(ip_address),
min_port=port_range[0],
max_port=port_range[1])
self.poller = zmq.Poller()
self.poller.register(self.zmq_socket, zmq.POLLOUT)
def put(self, message):
""" This function needs to be fast at the same time aware of the possibility of
ZMQ pipes overflowing.
The timeout increases slowly if contention is detected on ZMQ pipes.
We could set copy=False and get slightly better latency but this results
in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.
This issue can be magnified if each the serialized buffer itself is larger.
"""
timeout_ms = 0
while True:
socks = dict(self.poller.poll(timeout=timeout_ms))
if self.zmq_socket in socks and socks[self.zmq_socket] == zmq.POLLOUT:
# The copy option adds latency but reduces the risk of ZMQ overflow
self.zmq_socket.send_pyobj(message, copy=True)
return
else:
timeout_ms += 1
logger.debug("Not sending due to full zmq pipe, timeout: {} ms".format(timeout_ms))
def close(self):
self.zmq_socket.close()
self.context.term()
class ResultsIncoming(object):
""" Incoming results queue from the Interchange to the executor
"""
def __init__(self, ip_address, port_range):
"""
Parameters
----------
ip_address: str
IP address of the client (where Parsl runs)
port_range: tuple(int, int)
Port range for the comms between client and interchange
"""
self.context = zmq.Context()
self.results_receiver = self.context.socket(zmq.DEALER)
self.results_receiver.set_hwm(0)
self.port = self.results_receiver.bind_to_random_port("tcp://{}".format(ip_address),
min_port=port_range[0],
max_port=port_range[1])
def get(self, block=True, timeout=None):
return self.results_receiver.recv_multipart()
def request_close(self):
status = self.results_receiver.send(pickle.dumps(None))
time.sleep(0.1)
return status
def close(self):
self.results_receiver.close()
self.context.term()
| apache-2.0 |
ryannjohnson/popular-python | popular/users.py | 1 | 1254 |
class User(object):
"""Information about an authenticated user.
Providers return different forms of information. This container is
meant to provide a common interface with that information across all
vendors.
"""
ATTRIBUTES = [
'id',
'name',
'nickname',
'email',
'avatar',
]
def __getattr__(self, key):
if key in self.ATTRIBUTES:
return None
raise AttributeError("The attribute \"%s\" does not exist." % key)
def map(self, **kwargs):
"""Turns a dictionary into the properties of this user.
Args:
**kwargs: a dictionary of keys and values to merge into this
instance.
Raises:
KeyError: Cannot map attribute "%s" to user.
"""
for name in kwargs:
if name not in self.ATTRIBUTES:
raise KeyError("Cannot map attribute \"%s\" to user." % name)
setattr(self, name, kwargs[name])
def set_raw(self, user):
"""Saves the original data here."""
self.user = user
def to_dict(self):
output = dict()
for a in self.ATTRIBUTES:
output[a] = getattr(self, a, None)
return output
| mit |
rtucker-mozilla/inventory | api_v2/relengdistro_handler.py | 6 | 1741 | from piston.handler import BaseHandler, rc
from systems.models import System, RelengDistro, SystemRack,SystemStatus,NetworkAdapter,KeyValue
from truth.models import Truth, KeyValue as TruthKeyValue
from dhcp.DHCP import DHCP as DHCPInterface
from dhcp.models import DHCP
from MacroExpansion import MacroExpansion
from KeyValueTree import KeyValueTree
import re
try:
import json
except:
from django.utils import simplejson as json
from django.test.client import Client
from settings import API_ACCESS
class RelengDistroHandler(BaseHandler):
allowed_methods = API_ACCESS
model = RelengDistro
fields = ('id','distro_name')
def create(self, request, releng_distro_id=None):
rd = RelengDistro()
rd.save()
resp = rc.CREATED
resp.write('Record Created')
return resp
def read(self, request, releng_distro_id=None):
base = RelengDistro.objects
if releng_distro_id:
return base.get(pk=releng_distro_id)
else:
return base.all()
def update(self, request, releng_distro_id=None):
model = RelengDistro
if request.method == 'PUT':
try:
rd = model.objects.get(pk=releng_distro_id)
rd.distro_name = request.POST['releng_distro_name']
rd.save()
resp = rc.ALL_OK
except:
resp = rc.NOT_FOUND
return resp
def delete(self, request, releng_distro_id=None):
try:
rd = RelengDistro.objects.get(pk=releng_distro_id)
rd.delete()
resp = rc.DELETED
resp.write('Record Deleted')
except:
resp = rc.NOT_FOUND
return resp
| bsd-3-clause |
unixweb/rpi-cpu-alarm | alarm25.py | 1 | 1103 | #!/usr/bin/python
import os
import smtplib
import time
from email.mime.text import MIMEText
mailserver = "XXXX.de"
smtpport = 25
smtpuser = "XXXX"
smtpasswd = "XXXXX"
recipient = "XXXX@XXXX.de"
smtpsender = "alarm@XXXX.de"
alarmtemp = 25
day = repr(time.localtime()[2]) + "."+repr(time.localtime()[1])+"."+repr(time.localtime()[0])
zeit = repr(time.localtime()[3]) + ":" + repr(time.localtime()[4]) + ":"+ repr(time.localtime()[5])
def getCPUtemperature():
res = os.popen('vcgencmd measure_temp').readline()
return(res.replace("temp=","").replace("'C\n",""))
temp_float = float(getCPUtemperature())
if (temp_float > alarmtemp):
server = smtplib.SMTP(mailserver, smtpport)
server.login(smtpuser, smtpasswd)
value = "Die aktuelle Temperatur des Raspberry Pi liegt bei "+ getCPUtemperature()+" Grad Celsius.\n Zeit: "+zeit+ " am " + day
msg = MIMEText(value)
msg['Subject'] = "[Warnung] Temperatur "+ getCPUtemperature()+" Grad!"
msg['From'] = "RPi Temperature"
msg['To'] = recipient
server.sendmail(smtpsender, recipient, msg.as_string())
server.quit()
| gpl-3.0 |
angelapper/edx-platform | lms/djangoapps/courseware/model_data.py | 5 | 36505 | """
Classes to provide the LMS runtime data storage to XBlocks.
:class:`DjangoKeyValueStore`: An XBlock :class:`~KeyValueStore` which
stores a subset of xblocks scopes as Django ORM objects. It wraps
:class:`~FieldDataCache` to provide an XBlock-friendly interface.
:class:`FieldDataCache`: A object which provides a read-through prefetch cache
of data to support XBlock fields within a limited set of scopes.
The remaining classes in this module provide read-through prefetch cache implementations
for specific scopes. The individual classes provide the knowledge of what are the essential
pieces of information for each scope, and thus how to cache, prefetch, and create new field data
entries.
UserStateCache: A cache for Scope.user_state
UserStateSummaryCache: A cache for Scope.user_state_summary
PreferencesCache: A cache for Scope.preferences
UserInfoCache: A cache for Scope.user_info
DjangoOrmFieldCache: A base-class for single-row-per-field caches.
"""
import json
import logging
from abc import ABCMeta, abstractmethod
from collections import defaultdict, namedtuple
from contracts import contract, new_contract
from django.db import DatabaseError
from opaque_keys.edx.asides import AsideUsageKeyV1, AsideUsageKeyV2
from opaque_keys.edx.block_types import BlockTypeKeyV1
from opaque_keys.edx.keys import CourseKey, UsageKey
from xblock.core import XBlockAside
from xblock.exceptions import InvalidScopeError, KeyValueMultiSaveError
from xblock.fields import Scope, UserScope
from xblock.runtime import KeyValueStore
from courseware.user_state_client import DjangoXBlockUserStateClient
from xmodule.modulestore.django import modulestore
from .models import StudentModule, XModuleStudentInfoField, XModuleStudentPrefsField, XModuleUserStateSummaryField
log = logging.getLogger(__name__)
class InvalidWriteError(Exception):
"""
Raised to indicate that writing to a particular key
in the KeyValueStore is disabled
"""
def _all_usage_keys(descriptors, aside_types):
"""
Return a set of all usage_ids for the `descriptors` and for
as all asides in `aside_types` for those descriptors.
"""
usage_ids = set()
for descriptor in descriptors:
usage_ids.add(descriptor.scope_ids.usage_id)
for aside_type in aside_types:
usage_ids.add(AsideUsageKeyV1(descriptor.scope_ids.usage_id, aside_type))
usage_ids.add(AsideUsageKeyV2(descriptor.scope_ids.usage_id, aside_type))
return usage_ids
def _all_block_types(descriptors, aside_types):
"""
Return a set of all block_types for the supplied `descriptors` and for
the asides types in `aside_types` associated with those descriptors.
"""
block_types = set()
for descriptor in descriptors:
block_types.add(BlockTypeKeyV1(descriptor.entry_point, descriptor.scope_ids.block_type))
for aside_type in aside_types:
block_types.add(BlockTypeKeyV1(XBlockAside.entry_point, aside_type))
return block_types
class DjangoKeyValueStore(KeyValueStore):
"""
This KeyValueStore will read and write data in the following scopes to django models
Scope.user_state_summary
Scope.user_state
Scope.preferences
Scope.user_info
Access to any other scopes will raise an InvalidScopeError
Data for Scope.user_state is stored as StudentModule objects via the django orm.
Data for the other scopes is stored in individual objects that are named for the
scope involved and have the field name as a key
If the key isn't found in the expected table during a read or a delete, then a KeyError will be raised
"""
_allowed_scopes = (
Scope.user_state_summary,
Scope.user_state,
Scope.preferences,
Scope.user_info,
)
def __init__(self, field_data_cache):
self._field_data_cache = field_data_cache
def get(self, key):
self._raise_unless_scope_is_allowed(key)
return self._field_data_cache.get(key)
def set(self, key, value):
"""
Set a single value in the KeyValueStore
"""
self.set_many({key: value})
def set_many(self, kv_dict):
"""
Provide a bulk save mechanism.
`kv_dict`: A dictionary of dirty fields that maps
xblock.KvsFieldData._key : value
"""
for key in kv_dict:
# Check key for validity
self._raise_unless_scope_is_allowed(key)
self._field_data_cache.set_many(kv_dict)
def delete(self, key):
self._raise_unless_scope_is_allowed(key)
self._field_data_cache.delete(key)
def has(self, key):
self._raise_unless_scope_is_allowed(key)
return self._field_data_cache.has(key)
def _raise_unless_scope_is_allowed(self, key):
"""Raise an InvalidScopeError if key.scope is not in self._allowed_scopes."""
if key.scope not in self._allowed_scopes:
raise InvalidScopeError(key, self._allowed_scopes)
new_contract("DjangoKeyValueStore", DjangoKeyValueStore)
new_contract("DjangoKeyValueStore_Key", DjangoKeyValueStore.Key)
class DjangoOrmFieldCache(object):
"""
Baseclass for Scope-specific field cache objects that are based on
single-row-per-field Django ORM objects.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._cache = {}
def cache_fields(self, fields, xblocks, aside_types):
"""
Load all fields specified by ``fields`` for the supplied ``xblocks``
and ``aside_types`` into this cache.
Arguments:
fields (list of str): Field names to cache.
xblocks (list of :class:`XBlock`): XBlocks to cache fields for.
aside_types (list of str): Aside types to cache fields for.
"""
for field_object in self._read_objects(fields, xblocks, aside_types):
self._cache[self._cache_key_for_field_object(field_object)] = field_object
@contract(kvs_key=DjangoKeyValueStore.Key)
def get(self, kvs_key):
"""
Return the django model object specified by `kvs_key` from
the cache.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: A django orm object from the cache
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
if cache_key not in self._cache:
raise KeyError(kvs_key.field_name)
field_object = self._cache[cache_key]
return json.loads(field_object.value)
@contract(kvs_key=DjangoKeyValueStore.Key)
def set(self, kvs_key, value):
"""
Set the specified `kvs_key` to the field value `value`.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
value: The field value to store
"""
self.set_many({kvs_key: value})
@contract(kv_dict="dict(DjangoKeyValueStore_Key: *)")
def set_many(self, kv_dict):
"""
Set the specified fields to the supplied values.
Arguments:
kv_dict (dict): A dictionary mapping :class:`~DjangoKeyValueStore.Key`
objects to values to set.
"""
saved_fields = []
for kvs_key, value in sorted(kv_dict.items()):
cache_key = self._cache_key_for_kvs_key(kvs_key)
field_object = self._cache.get(cache_key)
try:
serialized_value = json.dumps(value)
# It is safe to force an insert or an update, because
# a) we should have retrieved the object as part of the
# prefetch step, so if it isn't in our cache, it doesn't exist yet.
# b) no other code should be modifying these models out of band of
# this cache.
if field_object is None:
field_object = self._create_object(kvs_key, serialized_value)
field_object.save(force_insert=True)
self._cache[cache_key] = field_object
else:
field_object.value = serialized_value
field_object.save(force_update=True)
except DatabaseError:
log.exception("Saving field %r failed", kvs_key.field_name)
raise KeyValueMultiSaveError(saved_fields)
finally:
saved_fields.append(kvs_key.field_name)
@contract(kvs_key=DjangoKeyValueStore.Key)
def delete(self, kvs_key):
"""
Delete the value specified by `kvs_key`.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Raises: KeyError if key isn't found in the cache
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
field_object = self._cache.get(cache_key)
if field_object is None:
raise KeyError(kvs_key.field_name)
field_object.delete()
del self._cache[cache_key]
@contract(kvs_key=DjangoKeyValueStore.Key, returns=bool)
def has(self, kvs_key):
"""
Return whether the specified `kvs_key` is set.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: bool
"""
return self._cache_key_for_kvs_key(kvs_key) in self._cache
@contract(kvs_key=DjangoKeyValueStore.Key, returns="datetime|None")
def last_modified(self, kvs_key):
"""
Return when the supplied field was changed.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: datetime if there was a modified date, or None otherwise
"""
field_object = self._cache.get(self._cache_key_for_kvs_key(kvs_key))
if field_object is None:
return None
else:
return field_object.modified
def __len__(self):
return len(self._cache)
@abstractmethod
def _create_object(self, kvs_key, value):
"""
Create a new object to add to the cache (which should record
the specified field ``value`` for the field identified by
``kvs_key``).
Arguments:
kvs_key (:class:`DjangoKeyValueStore.Key`): Which field to create an entry for
value: What value to record in the field
"""
raise NotImplementedError()
@abstractmethod
def _read_objects(self, fields, xblocks, aside_types):
"""
Return an iterator for all objects stored in the underlying datastore
for the ``fields`` on the ``xblocks`` and the ``aside_types`` associated
with them.
Arguments:
fields (list of str): Field names to return values for
xblocks (list of :class:`~XBlock`): XBlocks to load fields for
aside_types (list of str): Asides to load field for (which annotate the supplied
xblocks).
"""
raise NotImplementedError()
@abstractmethod
def _cache_key_for_field_object(self, field_object):
"""
Return the key used in this DjangoOrmFieldCache to store the specified field_object.
Arguments:
field_object: A Django model instance that stores the data for fields in this cache
"""
raise NotImplementedError()
@abstractmethod
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
raise NotImplementedError()
class UserStateCache(object):
"""
Cache for Scope.user_state xblock field data.
"""
def __init__(self, user, course_id):
self._cache = defaultdict(dict)
self.course_id = course_id
self.user = user
self._client = DjangoXBlockUserStateClient(self.user)
def cache_fields(self, fields, xblocks, aside_types): # pylint: disable=unused-argument
"""
Load all fields specified by ``fields`` for the supplied ``xblocks``
and ``aside_types`` into this cache.
Arguments:
fields (list of str): Field names to cache.
xblocks (list of :class:`XBlock`): XBlocks to cache fields for.
aside_types (list of str): Aside types to cache fields for.
"""
block_field_state = self._client.get_many(
self.user.username,
_all_usage_keys(xblocks, aside_types),
)
for user_state in block_field_state:
self._cache[user_state.block_key] = user_state.state
@contract(kvs_key=DjangoKeyValueStore.Key)
def set(self, kvs_key, value):
"""
Set the specified `kvs_key` to the field value `value`.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
value: The field value to store
"""
self.set_many({kvs_key: value})
@contract(kvs_key=DjangoKeyValueStore.Key, returns="datetime|None")
def last_modified(self, kvs_key):
"""
Return when the supplied field was changed.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The key representing the cached field
Returns: datetime if there was a modified date, or None otherwise
"""
try:
return self._client.get(
self.user.username,
kvs_key.block_scope_id,
fields=[kvs_key.field_name],
).updated
except self._client.DoesNotExist:
return None
@contract(kv_dict="dict(DjangoKeyValueStore_Key: *)")
def set_many(self, kv_dict):
"""
Set the specified fields to the supplied values.
Arguments:
kv_dict (dict): A dictionary mapping :class:`~DjangoKeyValueStore.Key`
objects to values to set.
"""
pending_updates = defaultdict(dict)
for kvs_key, value in kv_dict.items():
cache_key = self._cache_key_for_kvs_key(kvs_key)
pending_updates[cache_key][kvs_key.field_name] = value
try:
self._client.set_many(
self.user.username,
pending_updates
)
except DatabaseError:
log.exception("Saving user state failed for %s", self.user.username)
raise KeyValueMultiSaveError([])
finally:
self._cache.update(pending_updates)
@contract(kvs_key=DjangoKeyValueStore.Key)
def get(self, kvs_key):
"""
Return the django model object specified by `kvs_key` from
the cache.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: A django orm object from the cache
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
if cache_key not in self._cache:
raise KeyError(kvs_key.field_name)
return self._cache[cache_key][kvs_key.field_name]
@contract(kvs_key=DjangoKeyValueStore.Key)
def delete(self, kvs_key):
"""
Delete the value specified by `kvs_key`.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Raises: KeyError if key isn't found in the cache
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
if cache_key not in self._cache:
raise KeyError(kvs_key.field_name)
field_state = self._cache[cache_key]
if kvs_key.field_name not in field_state:
raise KeyError(kvs_key.field_name)
self._client.delete(self.user.username, cache_key, fields=[kvs_key.field_name])
del field_state[kvs_key.field_name]
@contract(kvs_key=DjangoKeyValueStore.Key, returns=bool)
def has(self, kvs_key):
"""
Return whether the specified `kvs_key` is set.
Arguments:
kvs_key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: bool
"""
cache_key = self._cache_key_for_kvs_key(kvs_key)
return (
cache_key in self._cache and
kvs_key.field_name in self._cache[cache_key]
)
def __len__(self):
return len(self._cache)
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
return key.block_scope_id
class UserStateSummaryCache(DjangoOrmFieldCache):
"""
Cache for Scope.user_state_summary xblock field data.
"""
def __init__(self, course_id):
super(UserStateSummaryCache, self).__init__()
self.course_id = course_id
def _create_object(self, kvs_key, value):
"""
Create a new object to add to the cache (which should record
the specified field ``value`` for the field identified by
``kvs_key``).
Arguments:
kvs_key (:class:`DjangoKeyValueStore.Key`): Which field to create an entry for
value: The value to assign to the new field object
"""
return XModuleUserStateSummaryField(
field_name=kvs_key.field_name,
usage_id=kvs_key.block_scope_id,
value=value,
)
def _read_objects(self, fields, xblocks, aside_types):
"""
Return an iterator for all objects stored in the underlying datastore
for the ``fields`` on the ``xblocks`` and the ``aside_types`` associated
with them.
Arguments:
fields (list of :class:`~Field`): Fields to return values for
xblocks (list of :class:`~XBlock`): XBlocks to load fields for
aside_types (list of str): Asides to load field for (which annotate the supplied
xblocks).
"""
return XModuleUserStateSummaryField.objects.chunked_filter(
'usage_id__in',
_all_usage_keys(xblocks, aside_types),
field_name__in=set(field.name for field in fields),
)
def _cache_key_for_field_object(self, field_object):
"""
Return the key used in this DjangoOrmFieldCache to store the specified field_object.
Arguments:
field_object: A Django model instance that stores the data for fields in this cache
"""
return (field_object.usage_id.map_into_course(self.course_id), field_object.field_name)
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
return (key.block_scope_id, key.field_name)
class PreferencesCache(DjangoOrmFieldCache):
"""
Cache for Scope.preferences xblock field data.
"""
def __init__(self, user):
super(PreferencesCache, self).__init__()
self.user = user
def _create_object(self, kvs_key, value):
"""
Create a new object to add to the cache (which should record
the specified field ``value`` for the field identified by
``kvs_key``).
Arguments:
kvs_key (:class:`DjangoKeyValueStore.Key`): Which field to create an entry for
value: The value to assign to the new field object
"""
return XModuleStudentPrefsField(
field_name=kvs_key.field_name,
module_type=BlockTypeKeyV1(kvs_key.block_family, kvs_key.block_scope_id),
student_id=kvs_key.user_id,
value=value,
)
def _read_objects(self, fields, xblocks, aside_types):
"""
Return an iterator for all objects stored in the underlying datastore
for the ``fields`` on the ``xblocks`` and the ``aside_types`` associated
with them.
Arguments:
fields (list of str): Field names to return values for
xblocks (list of :class:`~XBlock`): XBlocks to load fields for
aside_types (list of str): Asides to load field for (which annotate the supplied
xblocks).
"""
return XModuleStudentPrefsField.objects.chunked_filter(
'module_type__in',
_all_block_types(xblocks, aside_types),
student=self.user.pk,
field_name__in=set(field.name for field in fields),
)
def _cache_key_for_field_object(self, field_object):
"""
Return the key used in this DjangoOrmFieldCache to store the specified field_object.
Arguments:
field_object: A Django model instance that stores the data for fields in this cache
"""
return (field_object.module_type, field_object.field_name)
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
return (BlockTypeKeyV1(key.block_family, key.block_scope_id), key.field_name)
class UserInfoCache(DjangoOrmFieldCache):
"""
Cache for Scope.user_info xblock field data
"""
def __init__(self, user):
super(UserInfoCache, self).__init__()
self.user = user
def _create_object(self, kvs_key, value):
"""
Create a new object to add to the cache (which should record
the specified field ``value`` for the field identified by
``kvs_key``).
Arguments:
kvs_key (:class:`DjangoKeyValueStore.Key`): Which field to create an entry for
value: The value to assign to the new field object
"""
return XModuleStudentInfoField(
field_name=kvs_key.field_name,
student_id=kvs_key.user_id,
value=value,
)
def _read_objects(self, fields, xblocks, aside_types):
"""
Return an iterator for all objects stored in the underlying datastore
for the ``fields`` on the ``xblocks`` and the ``aside_types`` associated
with them.
Arguments:
fields (list of str): Field names to return values for
xblocks (list of :class:`~XBlock`): XBlocks to load fields for
aside_types (list of str): Asides to load field for (which annotate the supplied
xblocks).
"""
return XModuleStudentInfoField.objects.filter(
student=self.user.pk,
field_name__in=set(field.name for field in fields),
)
def _cache_key_for_field_object(self, field_object):
"""
Return the key used in this DjangoOrmFieldCache to store the specified field_object.
Arguments:
field_object: A Django model instance that stores the data for fields in this cache
"""
return field_object.field_name
def _cache_key_for_kvs_key(self, key):
"""
Return the key used in this DjangoOrmFieldCache for the specified KeyValueStore key.
Arguments:
key (:class:`~DjangoKeyValueStore.Key`): The key representing the cached field
"""
return key.field_name
class FieldDataCache(object):
"""
A cache of django model objects needed to supply the data
for a module and its descendants
"""
def __init__(self, descriptors, course_id, user, asides=None, read_only=False):
"""
Find any courseware.models objects that are needed by any descriptor
in descriptors. Attempts to minimize the number of queries to the database.
Note: Only modules that have store_state = True or have shared
state will have a StudentModule.
Arguments
descriptors: A list of XModuleDescriptors.
course_id: The id of the current course
user: The user for which to cache data
asides: The list of aside types to load, or None to prefetch no asides.
read_only: We should not perform writes (they become a no-op).
"""
if asides is None:
self.asides = []
else:
self.asides = asides
assert isinstance(course_id, CourseKey)
self.course_id = course_id
self.user = user
self.read_only = read_only
self.cache = {
Scope.user_state: UserStateCache(
self.user,
self.course_id,
),
Scope.user_info: UserInfoCache(
self.user,
),
Scope.preferences: PreferencesCache(
self.user,
),
Scope.user_state_summary: UserStateSummaryCache(
self.course_id,
),
}
self.scorable_locations = set()
self.add_descriptors_to_cache(descriptors)
def add_descriptors_to_cache(self, descriptors):
"""
Add all `descriptors` to this FieldDataCache.
"""
if self.user.is_authenticated():
self.scorable_locations.update(desc.location for desc in descriptors if desc.has_score)
for scope, fields in self._fields_to_cache(descriptors).items():
if scope not in self.cache:
continue
self.cache[scope].cache_fields(fields, descriptors, self.asides)
def add_descriptor_descendents(self, descriptor, depth=None, descriptor_filter=lambda descriptor: True):
"""
Add all descendants of `descriptor` to this FieldDataCache.
Arguments:
descriptor: An XModuleDescriptor
depth is the number of levels of descendant modules to load StudentModules for, in addition to
the supplied descriptor. If depth is None, load all descendant StudentModules
descriptor_filter is a function that accepts a descriptor and return whether the field data
should be cached
"""
def get_child_descriptors(descriptor, depth, descriptor_filter):
"""
Return a list of all child descriptors down to the specified depth
that match the descriptor filter. Includes `descriptor`
descriptor: The parent to search inside
depth: The number of levels to descend, or None for infinite depth
descriptor_filter(descriptor): A function that returns True
if descriptor should be included in the results
"""
if descriptor_filter(descriptor):
descriptors = [descriptor]
else:
descriptors = []
if depth is None or depth > 0:
new_depth = depth - 1 if depth is not None else depth
for child in descriptor.get_children() + descriptor.get_required_module_descriptors():
descriptors.extend(get_child_descriptors(child, new_depth, descriptor_filter))
return descriptors
with modulestore().bulk_operations(descriptor.location.course_key):
descriptors = get_child_descriptors(descriptor, depth, descriptor_filter)
self.add_descriptors_to_cache(descriptors)
@classmethod
def cache_for_descriptor_descendents(cls, course_id, user, descriptor, depth=None,
descriptor_filter=lambda descriptor: True,
asides=None, read_only=False):
"""
course_id: the course in the context of which we want StudentModules.
user: the django user for whom to load modules.
descriptor: An XModuleDescriptor
depth is the number of levels of descendant modules to load StudentModules for, in addition to
the supplied descriptor. If depth is None, load all descendant StudentModules
descriptor_filter is a function that accepts a descriptor and return whether the field data
should be cached
"""
cache = FieldDataCache([], course_id, user, asides=asides, read_only=read_only)
cache.add_descriptor_descendents(descriptor, depth, descriptor_filter)
return cache
def _fields_to_cache(self, descriptors):
"""
Returns a map of scopes to fields in that scope that should be cached
"""
scope_map = defaultdict(set)
for descriptor in descriptors:
for field in descriptor.fields.values():
scope_map[field.scope].add(field)
return scope_map
@contract(key=DjangoKeyValueStore.Key)
def get(self, key):
"""
Load the field value specified by `key`.
Arguments:
key (`DjangoKeyValueStore.Key`): The field value to load
Returns: The found value
Raises: KeyError if key isn't found in the cache
"""
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
raise KeyError(key.field_name)
return self.cache[key.scope].get(key)
@contract(kv_dict="dict(DjangoKeyValueStore_Key: *)")
def set_many(self, kv_dict):
"""
Set all of the fields specified by the keys of `kv_dict` to the values
in that dict.
Arguments:
kv_dict (dict): dict mapping from `DjangoKeyValueStore.Key`s to field values
Raises: DatabaseError if any fields fail to save
"""
if self.read_only:
return
saved_fields = []
by_scope = defaultdict(dict)
for key, value in kv_dict.iteritems():
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
continue
by_scope[key.scope][key] = value
for scope, set_many_data in by_scope.iteritems():
try:
self.cache[scope].set_many(set_many_data)
# If save is successful on these fields, add it to
# the list of successful saves
saved_fields.extend(key.field_name for key in set_many_data)
except KeyValueMultiSaveError as exc:
log.exception('Error saving fields %r', [key.field_name for key in set_many_data])
raise KeyValueMultiSaveError(saved_fields + exc.saved_field_names)
@contract(key=DjangoKeyValueStore.Key)
def delete(self, key):
"""
Delete the value specified by `key`.
Arguments:
key (`DjangoKeyValueStore.Key`): The field value to delete
Raises: KeyError if key isn't found in the cache
"""
if self.read_only:
return
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
raise KeyError(key.field_name)
self.cache[key.scope].delete(key)
@contract(key=DjangoKeyValueStore.Key, returns=bool)
def has(self, key):
"""
Return whether the specified `key` is set.
Arguments:
key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: bool
"""
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
return False
return self.cache[key.scope].has(key)
@contract(key=DjangoKeyValueStore.Key, returns="datetime|None")
def last_modified(self, key):
"""
Return when the supplied field was changed.
Arguments:
key (`DjangoKeyValueStore.Key`): The field value to delete
Returns: datetime if there was a modified date, or None otherwise
"""
if key.scope.user == UserScope.ONE and not self.user.is_anonymous():
# If we're getting user data, we expect that the key matches the
# user we were constructed for.
assert key.user_id == self.user.id
if key.scope not in self.cache:
return None
return self.cache[key.scope].last_modified(key)
def __len__(self):
return sum(len(cache) for cache in self.cache.values())
class ScoresClient(object):
"""
Basic client interface for retrieving Score information.
Eventually, this should read and write scores, but at the moment it only
handles the read side of things.
"""
Score = namedtuple('Score', 'correct total created')
def __init__(self, course_key, user_id):
self.course_key = course_key
self.user_id = user_id
self._locations_to_scores = {}
self._has_fetched = False
def __contains__(self, location):
"""Return True if we have a score for this location."""
return location in self._locations_to_scores
def fetch_scores(self, locations):
"""Grab score information."""
scores_qset = StudentModule.objects.filter(
student_id=self.user_id,
course_id=self.course_key,
module_state_key__in=set(locations),
)
# Locations in StudentModule don't necessarily have course key info
# attached to them (since old mongo identifiers don't include runs).
# So we have to add that info back in before we put it into our lookup.
self._locations_to_scores.update({
UsageKey.from_string(location).map_into_course(self.course_key): self.Score(correct, total, created)
for location, correct, total, created
in scores_qset.values_list('module_state_key', 'grade', 'max_grade', 'created')
})
self._has_fetched = True
def get(self, location):
"""
Get the score for a given location, if it exists.
If we don't have a score for that location, return `None`. Note that as
convention, you should be passing in a location with full course run
information.
"""
if not self._has_fetched:
raise ValueError(
"Tried to fetch location {} from ScoresClient before fetch_scores() has run."
.format(location)
)
return self._locations_to_scores.get(location.replace(version=None, branch=None))
@classmethod
def create_for_locations(cls, course_id, user_id, scorable_locations):
"""Create a ScoresClient with pre-fetched data for the given locations."""
client = cls(course_id, user_id)
client.fetch_scores(scorable_locations)
return client
# @contract(user_id=int, usage_key=UsageKey, score="number|None", max_score="number|None")
def set_score(user_id, usage_key, score, max_score):
"""
Set the score and max_score for the specified user and xblock usage.
"""
student_module, created = StudentModule.objects.get_or_create(
student_id=user_id,
module_state_key=usage_key,
course_id=usage_key.course_key,
defaults={
'grade': score,
'max_grade': max_score,
}
)
if not created:
student_module.grade = score
student_module.max_grade = max_score
student_module.save()
return student_module.modified
def get_score(user_id, usage_key):
"""
Get the score and max_score for the specified user and xblock usage.
Returns None if not found.
"""
try:
student_module = StudentModule.objects.get(
student_id=user_id,
module_state_key=usage_key,
course_id=usage_key.course_key,
)
except StudentModule.DoesNotExist:
return None
else:
return student_module
| agpl-3.0 |
rizar/fuel | tests/test_caltech101_silhouettes.py | 25 | 1642 | import numpy
from numpy.testing import assert_raises
from fuel.datasets import CalTech101Silhouettes
from tests import skip_if_not_available
def test_caltech101_silhouettes16():
skip_if_not_available(datasets=['caltech101_silhouettes16.hdf5'])
for which_set, size, num_examples in (
('train', 16, 4082), ('valid', 16, 2257), ('test', 16, 2302)):
ds = CalTech101Silhouettes(which_sets=[which_set], size=size,
load_in_memory=False)
assert ds.num_examples == num_examples
handle = ds.open()
features, targets = ds.get_data(handle, slice(0, 10))
assert features.shape == (10, 1, size, size)
assert targets.shape == (10, 1)
assert features.dtype == numpy.uint8
assert targets.dtype == numpy.uint8
def test_caltech101_silhouettes_unkn_size():
assert_raises(ValueError, CalTech101Silhouettes,
which_sets=['test'], size=10)
def test_caltech101_silhouettes28():
skip_if_not_available(datasets=['caltech101_silhouettes28.hdf5'])
for which_set, size, num_examples in (
('train', 28, 4100), ('valid', 28, 2264), ('test', 28, 2307)):
ds = CalTech101Silhouettes(which_sets=[which_set], size=size,
load_in_memory=False)
assert ds.num_examples == num_examples
handle = ds.open()
features, targets = ds.get_data(handle, slice(0, 10))
assert features.shape == (10, 1, size, size)
assert targets.shape == (10, 1)
assert features.dtype == numpy.uint8
assert targets.dtype == numpy.uint8
| mit |
benjaminabel/pelican-plugins | goodreads_activity/goodreads_activity.py | 76 | 1871 | # -*- coding: utf-8 -*-
"""
Goodreads Activity
==================
A Pelican plugin to lists books from your Goodreads shelves.
Copyright (c) Talha Mansoor
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from pelican import signals
class GoodreadsActivity():
def __init__(self, generator):
import feedparser
self.activities = feedparser.parse(
generator.settings['GOODREADS_ACTIVITY_FEED'])
def fetch(self):
goodreads_activity = {
'shelf_title': self.activities.feed.title,
'books': []
}
for entry in self.activities['entries']:
book = {
'title': entry.title,
'author': entry.author_name,
'link': entry.link,
'l_cover': entry.book_large_image_url,
'm_cover': entry.book_medium_image_url,
's_cover': entry.book_small_image_url,
'description': entry.book_description,
'rating': entry.user_rating,
'review': entry.user_review,
'tags': entry.user_shelves
}
goodreads_activity['books'].append(book)
return goodreads_activity
def fetch_goodreads_activity(gen, metadata):
if 'GOODREADS_ACTIVITY_FEED' in gen.settings:
gen.context['goodreads_activity'] = gen.goodreads.fetch()
def initialize_feedparser(generator):
generator.goodreads = GoodreadsActivity(generator)
def register():
try:
signals.article_generator_init.connect(initialize_feedparser)
signals.article_generator_context.connect(fetch_goodreads_activity)
except ImportError:
logger.warning('`goodreads_activity` failed to load dependency `feedparser`.'
'`goodreads_activity` plugin not loaded.')
| agpl-3.0 |
Benowit/android_kernel_motorola_msm8974 | tools/perf/scripts/python/net_dropmonitor.py | 1258 | 1562 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms[::-1]:
if loc >= i['loc']:
return (i['name'], loc - i['loc'])
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/python/kernel_tests/lrn_op_test.py | 4 | 5376 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for local response normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
class LRNOpTest(tf.test.TestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0,
alpha=1.0, beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = tf.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 2.0 * np.random.rand()
lrn_t = tf.nn.local_response_normalization(
p, name="lrn", depth_radius=lrn_depth_radius, bias=bias,
alpha=alpha, beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p], lrn_depth_radius=lrn_depth_radius, bias=bias, alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ", err)
if dtype == tf.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
def testCompute(self):
for use_gpu in (True, False):
for _ in range(2):
self._RunAndVerify(tf.float32, use_gpu)
# Enable when LRN supports tf.float16 on GPU.
if not use_gpu:
self._RunAndVerify(tf.float16, use_gpu)
def testGradientsZeroInput(self):
for use_gpu in (True, False):
with self.test_session(use_gpu=use_gpu):
shape = [4, 4, 4, 4]
p = tf.placeholder(tf.float32, shape=shape)
inp_array = np.zeros(shape).astype("f")
lrn_op = tf.nn.local_response_normalization(p, 2, 1.0, 0.0,
1.0, name="lrn")
grad = tf.gradients([lrn_op], [p])[0]
params = {p: inp_array}
r = grad.eval(feed_dict=params)
expected = np.ones(shape).astype("f")
self.assertAllClose(r, expected)
self.assertShapeEqual(expected, grad)
def _RunAndVerifyGradients(self, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
# random shape
shape = np.random.randint(1, 5, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 1.0 * np.random.rand()
if dtype == tf.float32:
inp_array = np.random.rand(*shape).astype(np.float32)
else:
inp_array = np.random.rand(*shape).astype(np.float16)
inp = tf.constant(
list(inp_array.ravel(order="C")),
shape=shape,
dtype=dtype)
lrn_op = tf.nn.local_response_normalization(
inp, name="lrn", depth_radius=lrn_depth_radius, bias=bias,
alpha=alpha, beta=beta)
err = tf.test.compute_gradient_error(inp, shape, lrn_op, shape)
print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
" is ", err)
if dtype == tf.float32:
self.assertLess(err, 1e-4)
else:
self.assertLess(err, 1.0)
def testGradients(self):
for use_gpu in (True, False):
for _ in range(2):
self._RunAndVerifyGradients(tf.float32, use_gpu)
# Enable when LRN supports tf.float16 on GPU.
if not use_gpu:
self._RunAndVerifyGradients(tf.float16, use_gpu)
if __name__ == "__main__":
tf.test.main()
| mit |
rosscdh/silk | django_silky/silk/profiling/dynamic.py | 8 | 6869 | from functools import partial
import inspect
import logging
import sys
import re
import six
from silk.profiling.profiler import silk_profile
Logger = logging.getLogger('silk')
def _get_module(module_name):
"""
Given a module name in form 'path.to.module' return module object for 'module'.
"""
if '.' in module_name:
splt = module_name.split('.')
imp = '.'.join(splt[:-1])
frm = splt[-1]
module = __import__(imp, globals(), locals(), [frm], 0)
module = getattr(module, frm)
else:
module = __import__(module_name, globals(), locals(), [], 0)
return module
def _get_func(module, func_name):
"""
Given a module and a function name, return the function.
func_name can be of the forms:
- 'foo': return a function
- 'Class.foo': return a method
"""
cls_name = None
cls = None
if '.' in func_name:
cls_name, func_name = func_name.split('.')
if cls_name:
cls = getattr(module, cls_name)
func = getattr(cls, func_name)
else:
func = getattr(module, func_name)
return cls, func
def profile_function_or_method(module, func, name=None):
"""
Programmatically apply a decorator to a function in a given module [+ class]
@param module: module object or module name in form 'path.to.module'
@param func: function object or function name in form 'foo' or 'Class.method'
"""
if type(module) is str or type(module) is unicode:
module = _get_module(module)
decorator = silk_profile(name, _dynamic=True)
func_name = func
cls, func = _get_func(module, func_name)
wrapped_target = decorator(func)
if cls:
setattr(cls, func_name.split('.')[-1], wrapped_target)
else:
setattr(module, func_name, wrapped_target)
def _get_parent_module(module):
parent = sys.modules
splt = module.__name__.split('.')
if len(splt) > 1:
for module_name in splt[:-1]:
try:
parent = getattr(parent, module_name)
except AttributeError:
parent = parent[module_name]
return parent
def _get_context_manager_source(end_line, file_path, name, start_line):
inject_code = "with silk_profile('%s', _dynamic=True):\n" % name
code = 'from silk.profiling.profiler import silk_profile\n'
with open(file_path, 'r') as f:
ws = ''
for i, line in enumerate(f):
if i == start_line:
# Use the same amount of whitespace as the line currently occupying
x = re.search(r"^(\s+).*$", line)
try:
ws = x.groups()[0]
except IndexError:
ws = ''
code += ws + inject_code
code += ws + ' ' + line
elif start_line < i <= end_line:
code += ws + ' ' + line
else:
code += line
return code
def _get_ws(txt):
"""
Return whitespace at the beginning of a string
"""
m = re.search(r"^(\s+).*$", txt)
try:
fws = m.groups()[0]
except AttributeError:
fws = ''
return fws
def _get_source_lines(func):
source = inspect.getsourcelines(func)[0]
fws = _get_ws(source[0])
for i in range(0, len(source)):
source[i] = source[i].replace(fws, '', 1)
return source
def _new_func_from_source(source, func):
"""
Create new function defined in source but maintain context from func
@param func: The function whose global + local context we will use
@param source: Python source code containing def statement
"""
src_str = ''.join(source)
frames = inspect.getouterframes(inspect.currentframe())
calling_frame = frames[2][0]
context = {}
# My initial instict was: exec src_str in func.func_globals.items(), calling_frame.f_locals
# however this seems to break the function closure so caveat here is that we create a new
# function with the locals merged into the globals.
#
# Possible consequences I can think of:
# - If a global exists that already has the same name as the local, it will be overwritten in
# in the context of this function. This shouldnt matter though as the global should have already
# been hidden by the new name?
#
# This functionality should be considered experimental as no idea what other consequences there
# could be.
#
# relevant: http://stackoverflow.com/questions/2749655/why-are-closures-broken-within-exec
globals = six.get_function_globals(func)
locals = calling_frame.f_locals
combined = globals.copy()
combined.update(locals)
Logger.debug('New src_str:\n %s' % src_str)
six.exec_(src_str, combined, context)
new_func = context[func.__name__]
return new_func
def _inject_context_manager_func(func, start_line, end_line, name):
"""
injects a context manager into the given function
e.g given:
x = 5
def foo():
print x
print '1'
print '2'
print '3'
inject_context_manager_func(foo, 0, 2, 'cm')
foo will now have the definition:
def foo():
with silk_profile('cm'):
print x
print '1'
print '2'
print '3'
closures, globals & locals are honoured
@param func: object of type<function> or type<instancemethod>
@param start_line: line at which to inject 'with' statement. line num. is relative to the func, not the module.
@param end_line: line at which to exit the context
@param name: name of the profiler
"""
source = _get_source_lines(func)
start_line += 1
end_line += 1
ws = _get_ws(source[start_line])
for i in range(start_line, end_line):
try:
source[i] = ' ' + source[i]
except IndexError:
raise IndexError('Function %s does not have line %d' % (func.__name__, i))
source.insert(start_line, ws + "from silk.profiling.profiler import silk_profile\n")
source.insert(start_line + 1, ws + "with silk_profile('%s', _dynamic=True):\n" % name)
return _new_func_from_source(source, func)
def is_str_typ(o):
return any(map(partial(isinstance, o), six.string_types)) \
or isinstance(o, six.text_type)
def inject_context_manager_func(module, func, start_line, end_line, name):
if is_str_typ(module):
module = _get_module(module)
cls = None
if is_str_typ(func):
func_name = func
cls, func = _get_func(module, func_name)
else:
func_name = func.__name__
new_func = _inject_context_manager_func(func, start_line, end_line, name)
if cls:
setattr(cls, func_name, new_func)
else:
setattr(module, func_name, new_func)
| mit |
fargalaxy1/geonode-leaflet | geonode/groups/tests.py | 11 | 17197 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.conf import settings
from guardian.shortcuts import get_anonymous_user
from geonode.groups.models import GroupProfile, GroupInvitation
from geonode.documents.models import Document
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.base.populate_test_data import create_models
from geonode.security.views import _perms_info_json
class SmokeTest(TestCase):
"""
Basic checks to make sure pages load, etc.
"""
fixtures = ['initial_data.json', "group_test_data"]
def setUp(self):
create_models(type='layer')
create_models(type='map')
create_models(type='document')
self.norman = get_user_model().objects.get(username="norman")
self.norman.groups.add(Group.objects.get(name='anonymous'))
self.test_user = get_user_model().objects.get(username='test_user')
self.test_user.groups.add(Group.objects.get(name='anonymous'))
self.bar = GroupProfile.objects.get(slug='bar')
self.anonymous_user = get_anonymous_user()
def test_group_permissions_extend_to_user(self):
"""
Ensures that when a user is in a group, the group permissions
extend to the user.
"""
layer = Layer.objects.all()[0]
# Set the default permissions
layer.set_default_permissions()
# Test that the anonymous user can read
self.assertTrue(self.anonymous_user.has_perm('view_resourcebase', layer.get_self_resource()))
# Test that the default perms give Norman view permissions but not
# write permissions
self.assertTrue(self.norman.has_perm('view_resourcebase', layer.get_self_resource()))
self.assertFalse(self.norman.has_perm('change_resourcebase', layer.get_self_resource()))
# Make sure Norman is not in the bar group.
self.assertFalse(self.bar.user_is_member(self.norman))
# Add norman to the bar group.
self.bar.join(self.norman)
# Ensure Norman is in the bar group.
self.assertTrue(self.bar.user_is_member(self.norman))
# Give the bar group permissions to change the layer.
permissions = {'groups': {'bar': ['view_resourcebase', 'change_resourcebase']}}
layer.set_permissions(permissions)
self.assertTrue(self.norman.has_perm('view_resourcebase', layer.get_self_resource()))
# check that now norman can change the layer
self.assertTrue(self.norman.has_perm('change_resourcebase', layer.get_self_resource()))
# Test adding a new user to the group after setting permissions on the layer.
# Make sure Test User is not in the bar group.
self.assertFalse(self.bar.user_is_member(self.test_user))
self.assertFalse(self.test_user.has_perm('change_resourcebase', layer.get_self_resource()))
self.bar.join(self.test_user)
self.assertTrue(self.test_user.has_perm('change_resourcebase', layer.get_self_resource()))
def test_group_resource(self):
"""
Tests the resources method on a Group object.
"""
layer = Layer.objects.all()[0]
map = Map.objects.all()[0]
perm_spec = {'groups': {'bar': ['change_resourcebase']}}
# Give the self.bar group write perms on the layer
layer.set_permissions(perm_spec)
map.set_permissions(perm_spec)
# Ensure the layer is returned in the group's resources
self.assertTrue(layer.get_self_resource() in self.bar.resources())
self.assertTrue(map.get_self_resource() in self.bar.resources())
# Test the resource filter
self.assertTrue(layer.get_self_resource() in self.bar.resources(resource_type='layer'))
self.assertTrue(map.get_self_resource() not in self.bar.resources(resource_type='layer'))
# Revoke permissions on the layer from the self.bar group
layer.set_permissions("{}")
# Ensure the layer is no longer returned in the groups resources
self.assertFalse(layer.get_self_resource() in self.bar.resources())
def test_perms_info(self):
"""
Tests the perms_info function (which passes permissions to the response context).
"""
# Add test to test perms being sent to the front end.
layer = Layer.objects.all()[0]
layer.set_default_permissions()
perms_info = layer.get_all_level_info()
# Ensure there is only one group 'anonymous' by default
self.assertEqual(len(perms_info['groups'].keys()), 1)
# Add the foo group to the layer object groups
layer.set_permissions({'groups': {'bar': ['view_resourcebase']}})
perms_info = _perms_info_json(layer)
# Ensure foo is in the perms_info output
self.assertDictEqual(json.loads(perms_info)['groups'], {'bar': ['view_resourcebase']})
def test_resource_permissions(self):
"""
Tests that the client can get and set group permissions through the test_resource_permissions view.
"""
self.assertTrue(self.client.login(username="admin", password="admin"))
layer = Layer.objects.all()[0]
document = Document.objects.all()[0]
map_obj = Map.objects.all()[0]
layer.set_default_permissions()
document.set_default_permissions()
map_obj.set_default_permissions()
objects = layer, document, map_obj
for obj in objects:
response = self.client.get(reverse('resource_permissions', kwargs=dict(resource_id=obj.id)))
self.assertEqual(response.status_code, 200)
js = json.loads(response.content)
permissions = js.get('permissions', dict())
if isinstance(permissions, unicode) or isinstance(permissions, str):
permissions = json.loads(permissions)
# Ensure the groups value is empty by default
expected_permissions = {}
if settings.DEFAULT_ANONYMOUS_DOWNLOAD_PERMISSION:
expected_permissions.setdefault(u'anonymous', []).append(u'download_resourcebase')
if settings.DEFAULT_ANONYMOUS_VIEW_PERMISSION:
expected_permissions.setdefault(u'anonymous', []).append(u'view_resourcebase')
self.assertDictEqual(permissions.get('groups'), expected_permissions)
permissions = {
'groups': {
'bar': ['change_resourcebase']
},
'users': {
'admin': ['change_resourcebase']
}
}
# Give the bar group permissions
response = self.client.post(
reverse(
'resource_permissions',
kwargs=dict(resource_id=obj.id)),
data=json.dumps(permissions),
content_type="application/json")
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('resource_permissions', kwargs=dict(resource_id=obj.id)))
js = json.loads(response.content)
permissions = js.get('permissions', dict())
if isinstance(permissions, unicode) or isinstance(permissions, str):
permissions = json.loads(permissions)
# Make sure the bar group now has write permissions
self.assertDictEqual(permissions['groups'], {'bar': ['change_resourcebase']})
# Remove group permissions
permissions = {"users": {"admin": ['change_resourcebase']}}
# Update the object's permissions to remove the bar group
response = self.client.post(
reverse(
'resource_permissions',
kwargs=dict(resource_id=obj.id)),
data=json.dumps(permissions),
content_type="application/json")
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('resource_permissions', kwargs=dict(resource_id=obj.id)))
js = json.loads(response.content)
permissions = js.get('permissions', dict())
if isinstance(permissions, unicode) or isinstance(permissions, str):
permissions = json.loads(permissions)
# Assert the bar group no longer has permissions
self.assertDictEqual(permissions['groups'], {})
def test_create_new_group(self):
"""
Tests creating a group through the group_create route.
"""
d = dict(title='TestGroup',
description='This is a test group.',
access='public',
keywords='testing, groups')
self.client.login(username="admin", password="admin")
response = self.client.post(reverse('group_create'), data=d)
# successful POSTS will redirect to the group's detail view.
self.assertEqual(response.status_code, 302)
self.assertTrue(GroupProfile.objects.get(title='TestGroup'))
def test_delete_group_view(self):
"""
Tests deleting a group through the group_delete route.
"""
# Ensure the group exists
self.assertTrue(GroupProfile.objects.get(id=self.bar.id))
self.client.login(username="admin", password="admin")
# Delete the group
response = self.client.post(reverse('group_remove', args=[self.bar.slug]))
# successful POSTS will redirect to the group list view.
self.assertEqual(response.status_code, 302)
self.assertFalse(GroupProfile.objects.filter(id=self.bar.id).count() > 0)
def test_delete_group_view_no_perms(self):
"""
Tests deleting a group through the group_delete with a non-manager.
"""
# Ensure the group exists
self.assertTrue(GroupProfile.objects.get(id=self.bar.id))
self.client.login(username="norman", password="norman")
# Delete the group
response = self.client.post(reverse('group_remove', args=[self.bar.slug]))
self.assertEqual(response.status_code, 403)
# Ensure the group still exists
self.assertTrue(GroupProfile.objects.get(id=self.bar.id))
def test_groupmember_manager(self):
"""
Tests the get_managers method.
"""
norman = get_user_model().objects.get(username="norman")
admin = get_user_model().objects.get(username='admin')
# Make sure norman is not a user
self.assertFalse(self.bar.user_is_member(norman))
# Add norman to the self.bar group
self.bar.join(norman)
# Ensure norman is now a member
self.assertTrue(self.bar.user_is_member(norman))
# Ensure norman is not in the managers queryset
self.assertTrue(norman not in self.bar.get_managers())
# Ensure admin is in the managers queryset
self.assertTrue(admin in self.bar.get_managers())
def test_public_pages_render(self):
"""
Verify pages that do not require login load without internal error
"""
response = self.client.get("/groups/")
self.assertEqual(200, response.status_code)
response = self.client.get("/groups/group/bar/")
self.assertEqual(200, response.status_code)
response = self.client.get("/groups/group/bar/members/")
self.assertEqual(200, response.status_code)
# 302 for auth failure since we redirect to login page
response = self.client.get("/groups/create/")
self.assertEqual(302, response.status_code)
response = self.client.get("/groups/group/bar/update/")
self.assertEqual(302, response.status_code)
# 405 - json endpoint, doesn't support GET
response = self.client.get("/groups/group/bar/invite/")
self.assertEqual(405, response.status_code)
def test_protected_pages_render(self):
"""
Verify pages that require login load without internal error
"""
self.assertTrue(self.client.login(username="admin", password="admin"))
response = self.client.get("/groups/")
self.assertEqual(200, response.status_code)
response = self.client.get("/groups/group/bar/")
self.assertEqual(200, response.status_code)
response = self.client.get("/groups/group/bar/members/")
self.assertEqual(200, response.status_code)
response = self.client.get("/groups/create/")
self.assertEqual(200, response.status_code)
response = self.client.get("/groups/group/bar/update/")
self.assertEqual(200, response.status_code)
# 405 - json endpoint, doesn't support GET
response = self.client.get("/groups/group/bar/invite/")
self.assertEqual(405, response.status_code)
class MembershipTest(TestCase):
"""
Tests membership logic in the geonode.groups models
"""
fixtures = ["group_test_data"]
def test_group_is_member(self):
"""
Tests checking group membership
"""
anon = get_anonymous_user()
normal = get_user_model().objects.get(username="norman")
group = GroupProfile.objects.get(slug="bar")
self.assert_(not group.user_is_member(anon))
self.assert_(not group.user_is_member(normal))
def test_group_add_member(self):
"""
Tests adding a user to a group
"""
anon = get_anonymous_user()
normal = get_user_model().objects.get(username="norman")
group = GroupProfile.objects.get(slug="bar")
group.join(normal)
self.assert_(group.user_is_member(normal))
self.assertRaises(ValueError, lambda: group.join(anon))
class InvitationTest(TestCase):
"""
Tests invitation logic in geonode.groups models
"""
fixtures = ["group_test_data"]
def test_invite_user(self):
"""
Tests inviting a registered user
"""
normal = get_user_model().objects.get(username="norman")
admin = get_user_model().objects.get(username="admin")
group = GroupProfile.objects.get(slug="bar")
group.invite(normal, admin, role="member", send=False)
self.assert_(GroupInvitation.objects.filter(user=normal, from_user=admin, group=group).exists())
invite = GroupInvitation.objects.get(user=normal, from_user=admin, group=group)
# Test that the user can access the token url.
self.client.login(username="norman", password="norman")
response = self.client.get("/groups/group/{group}/invite/{token}/".format(group=group, token=invite.token))
self.assertEqual(200, response.status_code)
def test_accept_invitation(self):
"""
Tests accepting an invitation
"""
anon = get_anonymous_user()
normal = get_user_model().objects.get(username="norman")
admin = get_user_model().objects.get(username="admin")
group = GroupProfile.objects.get(slug="bar")
group.invite(normal, admin, role="member", send=False)
invitation = GroupInvitation.objects.get(user=normal, from_user=admin, group=group)
self.assertRaises(ValueError, lambda: invitation.accept(anon))
self.assertRaises(ValueError, lambda: invitation.accept(admin))
invitation.accept(normal)
self.assert_(group.user_is_member(normal))
self.assert_(invitation.state == "accepted")
def test_decline_invitation(self):
"""
Tests declining an invitation
"""
anon = get_anonymous_user()
normal = get_user_model().objects.get(username="norman")
admin = get_user_model().objects.get(username="admin")
group = GroupProfile.objects.get(slug="bar")
group.invite(normal, admin, role="member", send=False)
invitation = GroupInvitation.objects.get(user=normal, from_user=admin, group=group)
self.assertRaises(ValueError, lambda: invitation.decline(anon))
self.assertRaises(ValueError, lambda: invitation.decline(admin))
invitation.decline(normal)
self.assert_(not group.user_is_member(normal))
self.assert_(invitation.state == "declined")
| gpl-3.0 |
ShaheenHossain/eagle-medical | medical_his/tests/test_medical_hospital_or.py | 1 | 1700 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of medical_his,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# medical_his is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# medical_his is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with one2many_groups.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from anybox.testing.openerp import SharedSetupTransactionCase
from openerp.exceptions import ValidationError
class TestMedicalHospitalOr(SharedSetupTransactionCase):
_data_files = (
'data/medical_his_data.xml',
)
_module_ns = 'medical_his'
def setUp(self):
SharedSetupTransactionCase.setUp(self)
def test_unicity(self):
medical_hospital_or = self.env['medical.hospital.or']
op_room = self.env.ref('%s.or_1' % self._module_ns)
vals = {
'name': op_room.name,
'zone_id': op_room.zone_id.id,
}
with self.assertRaises(ValidationError):
medical_hospital_or.create(vals)
| agpl-3.0 |
Socim/Alucard-Kernel-jflte | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
MetrodataTeam/incubator-airflow | airflow/contrib/sensors/jira_sensor.py | 30 | 5905 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from jira.resources import Resource
from airflow.contrib.operators.jira_operator import JIRAError
from airflow.contrib.operators.jira_operator import JiraOperator
from airflow.operators.sensors import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class JiraSensor(BaseSensorOperator):
"""
Monitors a jira ticket for any change.
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: str
:param method_name: method name from jira-python-sdk to be execute
:type method_name: str
:param method_params: parameters for the method method_name
:type method_params: dict
:param result_processor: function that return boolean and act as a sensor response
:type result_processor: function
"""
@apply_defaults
def __init__(self,
jira_conn_id='jira_default',
method_name=None,
method_params=None,
result_processor=None,
*args,
**kwargs):
super(JiraSensor, self).__init__(*args, **kwargs)
self.jira_conn_id = jira_conn_id
self.result_processor = None
if result_processor is not None:
self.result_processor = result_processor
self.method_name = method_name
self.method_params = method_params
self.jira_operator = JiraOperator(task_id=self.task_id,
jira_conn_id=self.jira_conn_id,
jira_method=self.method_name,
jira_method_args=self.method_params,
result_processor=self.result_processor)
def poke(self, context):
return self.jira_operator.execute(context=context)
class JiraTicketSensor(JiraSensor):
"""
Monitors a jira ticket for given change in terms of function.
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: str
:param ticket_id: id of the ticket to be monitored
:type ticket_id: str
:param field: field of the ticket to be monitored
:type field: str
:param expected_value: expected value of the field
:type expected_value: str
:param result_processor: function that return boolean and act as a sensor response
:type result_processor: function
"""
template_fields = ("ticket_id",)
@apply_defaults
def __init__(self,
jira_conn_id='jira_default',
ticket_id=None,
field=None,
expected_value=None,
field_checker_func=None,
*args, **kwargs):
self.jira_conn_id = jira_conn_id
self.ticket_id = ticket_id
self.field = field
self.expected_value = expected_value
if field_checker_func is None:
field_checker_func = self.issue_field_checker
super(JiraTicketSensor, self).__init__(jira_conn_id=jira_conn_id,
result_processor=field_checker_func,
*args, **kwargs)
def poke(self, context):
logging.info('Jira Sensor checking for change in ticket : {0}'
.format(self.ticket_id))
self.jira_operator.method_name = "issue"
self.jira_operator.jira_method_args = {
'id': self.ticket_id,
'fields': self.field
}
return JiraSensor.poke(self, context=context)
def issue_field_checker(self, context, issue):
result = None
try:
if issue is not None \
and self.field is not None \
and self.expected_value is not None:
field_value = getattr(issue.fields, self.field)
if field_value is not None:
if isinstance(field_value, list):
result = self.expected_value in field_value
elif isinstance(field_value, str):
result = self.expected_value.lower() == field_value.lower()
elif isinstance(field_value, Resource) \
and getattr(field_value, 'name'):
result = self.expected_value.lower() == field_value.name.lower()
else:
logging.warning("not implemented checker for issue field {0} "
"which is neither string nor list nor "
"jira Resource".format(self.field))
except JIRAError as jira_error:
logging.error("jira error while checking with expected value: {0}"
.format(jira_error))
except Exception as e:
logging.error("error while checking with expected value {0}, error: {1}"
.format(self.expected_value, e))
if result is True:
logging.info("issue field {0} has expected value {1}, returning success"
.format(self.field, self.expected_value))
else:
logging.info("issue field {0} dont have expected value {1} yet."
.format(self.field, self.expected_value))
return result
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.4/django/contrib/gis/tests/distapp/models.py | 406 | 1832 | from django.contrib.gis.db import models
class SouthTexasCity(models.Model):
"City model on projected coordinate system for South Texas."
name = models.CharField(max_length=30)
point = models.PointField(srid=32140)
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasCityFt(models.Model):
"Same City model as above, but U.S. survey feet are the units."
name = models.CharField(max_length=30)
point = models.PointField(srid=2278)
objects = models.GeoManager()
def __unicode__(self): return self.name
class AustraliaCity(models.Model):
"City model for Australia, using WGS84."
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class CensusZipcode(models.Model):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
name = models.CharField(max_length=5)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasZipcode(models.Model):
"Model for a few South Texas ZIP codes."
name = models.CharField(max_length=5)
poly = models.PolygonField(srid=32140, null=True)
objects = models.GeoManager()
def __unicode__(self): return self.name
class Interstate(models.Model):
"Geodetic model for U.S. Interstates."
name = models.CharField(max_length=10)
path = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasInterstate(models.Model):
"Projected model for South Texas Interstates."
name = models.CharField(max_length=10)
path = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __unicode__(self): return self.name
| bsd-3-clause |
bwrsandman/OpenUpgrade | addons/crm_profiling/__init__.py | 438 | 1089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_profiling
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
midma101/m0du1ar | .venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.py | 439 | 11949 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| mit |
weiawe/django | tests/migrations/test_loader.py | 28 | 11971 | from __future__ import unicode_literals
from unittest import skipIf
from django.db import connection, connections
from django.db.migrations.graph import NodeNotFoundError
from django.db.migrations.loader import AmbiguityError, MigrationLoader
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, modify_settings, override_settings
from django.utils import six
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
{("myapp", "0432_ponies")},
)
# That should not affect records of another database
recorder_other = MigrationRecorder(connections['other'])
self.assertEqual(
set((x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"),
set(),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
@modify_settings(INSTALLED_APPS={'append': 'basic'})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
# Ensure we've included unmigrated apps in there too
self.assertIn("basic", project_state.real_apps)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "user"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_run_before(self):
"""
Makes sure the loader uses Migration.run_before.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0003_third"),
("migrations", "0002_second"),
],
)
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations_first",
"migrations2": "migrations2.test_migrations_2_first",
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_first(self):
"""
Makes sure the '__first__' migrations build correctly.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "second")),
[
("migrations", "thefirst"),
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
("migrations", "second"),
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "import_error_package"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
MigrationLoader(connection)
@skipIf(six.PY2, "PY2 doesn't load empty dirs.")
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
MigrationLoader(connection)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
2,
)
recorder.flush()
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_loading_squashed_complex(self):
"Tests loading a complex set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 to 5 cannot use the squashed migration
recorder.record_applied("migrations", "3_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "4_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
recorder.flush()
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps(self):
loader = MigrationLoader(connection)
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
expected_plan = set([
('app1', '4_auto'),
('app1', '2_squashed_3'),
('app2', '1_squashed_2'),
('app1', '1_auto')
])
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_erroneous"})
def test_loading_squashed_erroneous(self):
"Tests loading a complex but erroneous set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 or 4 we'd need to use non-existing migrations
msg = ("Migration migrations.6_auto depends on nonexistent node ('migrations', '5_auto'). "
"Django tried to replace migration migrations.5_auto with any of "
"[migrations.3_squashed_5] but wasn't able to because some of the replaced "
"migrations are already applied.")
recorder.record_applied("migrations", "3_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
recorder.record_applied("migrations", "4_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
recorder.flush()
| bsd-3-clause |
mojaves/convirt | tests/monkey.py | 1 | 3383 | from __future__ import absolute_import
#
# Copyright 2012-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from contextlib import contextmanager
from functools import wraps
import inspect
class Patch(object):
def __init__(self, what):
self.what = what
self.old = []
def apply(self):
for module, name, that in self.what:
old = getattr(module, name)
self.old.append((module, name, old))
# The following block is done so that if it is a method we are
# patching in, that it will have the same type as the method it
# replaced.
if inspect.isclass(module):
if inspect.isfunction(old):
that = staticmethod(that)
elif (inspect.ismethod(old) and
getattr(old, 'im_self', None) is not None):
that = classmethod(that)
setattr(module, name, that)
def revert(self):
assert self.old != []
while self.old:
module, name, that = self.old.pop()
# The following block is only necessary for Python2 as it wrongly
# sets the function as instancemethod instead of keeping it as
# staticmethod.
if inspect.isclass(module):
if inspect.isfunction(that):
that = staticmethod(that)
setattr(module, name, that)
@contextmanager
def patch_scope(what):
patch = Patch(what)
patch.apply()
try:
yield
finally:
patch.revert()
def patch_function(module, name, that):
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
with patch_scope([(module, name, that)]):
return f(*args, **kw)
return wrapper
return decorator
def patch_class(module, name, that):
def setup_decorator(func):
@wraps(func)
def setup(self, *a, **kw):
if not hasattr(self, '__monkeystack__'):
self.__monkeystack__ = []
patch = Patch([(module, name, that)])
self.__monkeystack__.append(patch)
patch.apply()
return func(self, *a, **kw)
return setup
def teardown_decorator(func):
@wraps(func)
def teardown(self, *a, **kw):
patch = self.__monkeystack__.pop()
patch.revert()
return func(self, *a, **kw)
return teardown
def wrapper(cls):
cls.setUp = setup_decorator(cls.setUp)
cls.tearDown = teardown_decorator(cls.tearDown)
return cls
return wrapper
| lgpl-2.1 |
sv-dev1/odoo | openerp/cli/server.py | 187 | 5869 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
OpenERP - Server
OpenERP is an ERP+CRM program for small and medium businesses.
The whole source code is distributed under the terms of the
GNU Public Licence.
(c) 2003-TODAY, Fabien Pinckaers - OpenERP SA
"""
import atexit
import csv
import logging
import os
import signal
import sys
import threading
import traceback
import time
import openerp
from . import Command
__author__ = openerp.release.author
__version__ = openerp.release.version
# Also use the `openerp` logger for the main script.
_logger = logging.getLogger('openerp')
def check_root_user():
""" Exit if the process's user is 'root' (on POSIX system)."""
if os.name == 'posix':
import pwd
if pwd.getpwuid(os.getuid())[0] == 'root' :
sys.stderr.write("Running as user 'root' is a security risk, aborting.\n")
sys.exit(1)
def check_postgres_user():
""" Exit if the configured database user is 'postgres'.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if config['db_user'] == 'postgres':
sys.stderr.write("Using the database user 'postgres' is a security risk, aborting.")
sys.exit(1)
def report_configuration():
""" Log the server version and some configuration values.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
_logger.info("OpenERP version %s", __version__)
for name, value in [('addons paths', openerp.modules.module.ad_paths),
('database hostname', config['db_host'] or 'localhost'),
('database port', config['db_port'] or '5432'),
('database user', config['db_user'])]:
_logger.info("%s: %s", name, value)
def rm_pid_file():
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
try:
os.unlink(config['pidfile'])
except OSError:
pass
def setup_pid_file():
""" Create a file with the process id written in it.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
with open(config['pidfile'], 'w') as fd:
pidtext = "%d" % (os.getpid())
fd.write(pidtext)
atexit.register(rm_pid_file)
def export_translation():
config = openerp.tools.config
dbname = config['db_name']
if config["language"]:
msg = "language %s" % (config["language"],)
else:
msg = "new language"
_logger.info('writing translation file for %s to %s', msg,
config["translate_out"])
fileformat = os.path.splitext(config["translate_out"])[-1][1:].lower()
with open(config["translate_out"], "w") as buf:
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_export(config["language"],
config["translate_modules"] or ["all"], buf, fileformat, cr)
_logger.info('translation file written successfully')
def import_translation():
config = openerp.tools.config
context = {'overwrite': config["overwrite_existing_translations"]}
dbname = config['db_name']
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_load(
cr, config["translate_in"], config["language"], context=context,
)
def main(args):
check_root_user()
openerp.tools.config.parse_config(args)
check_postgres_user()
report_configuration()
config = openerp.tools.config
# the default limit for CSV fields in the module is 128KiB, which is not
# quite sufficient to import images to store in attachment. 500MiB is a
# bit overkill, but better safe than sorry I guess
csv.field_size_limit(500 * 1024 * 1024)
if config["test_file"]:
config["test_enable"] = True
if config["translate_out"]:
export_translation()
sys.exit(0)
if config["translate_in"]:
import_translation()
sys.exit(0)
# This needs to be done now to ensure the use of the multiprocessing
# signaling mecanism for registries loaded with -d
if config['workers']:
openerp.multi_process = True
preload = []
if config['db_name']:
preload = config['db_name'].split(',')
stop = config["stop_after_init"]
setup_pid_file()
rc = openerp.service.server.start(preload=preload, stop=stop)
sys.exit(rc)
class Server(Command):
"""Start the odoo server (default command)"""
def run(self, args):
main(args)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jeffmahoney/supybot | plugins/Reply/test.py | 18 | 2633 | ###
# Copyright (c) 2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.ircutils as ircutils
class ReplyTestCase(ChannelPluginTestCase):
plugins = ('Reply',)
def testPrivate(self):
m = self.getMsg('private [list]')
self.failIf(ircutils.isChannel(m.args[0]))
def testNotice(self):
m = self.getMsg('notice [list]')
self.assertEqual(m.command, 'NOTICE')
def testNoticePrivate(self):
m = self.assertNotError('notice [private [list]]')
self.assertEqual(m.command, 'NOTICE')
self.assertEqual(m.args[0], self.nick)
m = self.assertNotError('private [notice [list]]')
self.assertEqual(m.command, 'NOTICE')
self.assertEqual(m.args[0], self.nick)
class ReplyNonChannelTestCase(PluginTestCase):
plugins = ('Reply',)
def testAction(self):
self.prefix = 'something!else@somewhere.else'
self.nick = 'something'
m = self.assertAction('action foo', 'foo')
self.failIf(m.args[0] == self.irc.nick)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
yfried/ansible | lib/ansible/modules/monitoring/grafana_dashboard.py | 6 | 14980 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Thierry Sallé (@seuf)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: grafana_dashboard
author:
- Thierry Sallé (@seuf)
version_added: "2.5"
short_description: Manage Grafana dashboards
description:
- Create, update, delete, export Grafana dashboards via API.
options:
url:
description:
- The Grafana URL.
required: true
aliases: [ grafana_url ]
version_added: 2.7
url_username:
description:
- The Grafana API user.
default: admin
aliases: [ grafana_user ]
version_added: 2.7
url_password:
description:
- The Grafana API password.
default: admin
aliases: [ grafana_password ]
version_added: 2.7
grafana_api_key:
description:
- The Grafana API key.
- If set, I(grafana_user) and I(grafana_password) will be ignored.
org_id:
description:
- The Grafana Organisation ID where the dashboard will be imported / exported.
- Not used when I(grafana_api_key) is set, because the grafana_api_key only belongs to one organisation..
default: 1
state:
description:
- State of the dashboard.
required: true
choices: [ absent, export, present ]
default: present
slug:
description:
- Deprecated since Grafana 5. Use grafana dashboard uid instead.
- slug of the dashboard. It's the friendly url name of the dashboard.
- When C(state) is C(present), this parameter can override the slug in the meta section of the json file.
- If you want to import a json dashboard exported directly from the interface (not from the api),
you have to specify the slug parameter because there is no meta section in the exported json.
uid:
version_added: 2.7
description:
- uid of the dasboard to export when C(state) is C(export) or C(absent).
path:
description:
- The path to the json file containing the Grafana dashboard to import or export.
overwrite:
description:
- Override existing dashboard when state is present.
type: bool
default: 'no'
message:
description:
- Set a commit message for the version history.
- Only used when C(state) is C(present).
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, client_key is not required
version_added: 2.7
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client
- authentication. If client_cert contains both the certificate and key, this option is not required
version_added: 2.7
use_proxy:
description:
- Boolean of whether or not to use proxy.
default: 'yes'
type: bool
version_added: 2.7
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: Import Grafana dashboard foo
grafana_dashboard:
grafana_url: http://grafana.company.com
grafana_api_key: "{{ grafana_api_key }}"
state: present
message: Updated by ansible
overwrite: yes
path: /path/to/dashboards/foo.json
- name: Export dashboard
grafana_dashboard:
grafana_url: http://grafana.company.com
grafana_user: "admin"
grafana_password: "{{ grafana_password }}"
org_id: 1
state: export
uid: "000000653"
path: "/path/to/dashboards/000000653.json"
'''
RETURN = '''
---
uid:
description: uid or slug of the created / deleted / exported dashboard.
returned: success
type: string
sample: 000000063
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url, url_argument_spec
from ansible.module_utils._text import to_native
__metaclass__ = type
class GrafanaAPIException(Exception):
pass
class GrafanaMalformedJson(Exception):
pass
class GrafanaExportException(Exception):
pass
class GrafanaDeleteException(Exception):
pass
def grafana_switch_organisation(module, grafana_url, org_id, headers):
r, info = fetch_url(module, '%s/api/user/using/%s' % (grafana_url, org_id), headers=headers, method='POST')
if info['status'] != 200:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (org_id, info))
def grafana_headers(module, data):
headers = {'content-type': 'application/json; charset=utf8'}
if 'grafana_api_key' in data and data['grafana_api_key']:
headers['Authorization'] = "Bearer %s" % data['grafana_api_key']
else:
module.params['force_basic_auth'] = True
grafana_switch_organisation(module, data['grafana_url'], data['org_id'], headers)
return headers
def get_grafana_version(module, grafana_url, headers):
grafana_version = None
r, info = fetch_url(module, '%s/api/frontend/settings' % grafana_url, headers=headers, method='GET')
if info['status'] == 200:
try:
settings = json.loads(r.read())
grafana_version = str.split(settings['buildInfo']['version'], '.')[0]
except Exception as e:
raise GrafanaAPIException(e)
else:
raise GrafanaAPIException('Unable to get grafana version : %s' % info)
return int(grafana_version)
def grafana_dashboard_exists(module, grafana_url, uid, headers):
dashboard_exists = False
dashboard = {}
grafana_version = get_grafana_version(module, grafana_url, headers)
if grafana_version >= 5:
r, info = fetch_url(module, '%s/api/dashboards/uid/%s' % (grafana_url, uid), headers=headers, method='GET')
else:
r, info = fetch_url(module, '%s/api/dashboards/db/%s' % (grafana_url, uid), headers=headers, method='GET')
if info['status'] == 200:
dashboard_exists = True
try:
dashboard = json.loads(r.read())
except Exception as e:
raise GrafanaAPIException(e)
elif info['status'] == 404:
dashboard_exists = False
else:
raise GrafanaAPIException('Unable to get dashboard %s : %s' % (uid, info))
return dashboard_exists, dashboard
def grafana_create_dashboard(module, data):
# define data payload for grafana API
try:
with open(data['path'], 'r') as json_file:
payload = json.load(json_file)
except Exception as e:
raise GrafanaAPIException("Can't load json file %s" % to_native(e))
# Check that the dashboard JSON is nested under the 'dashboard' key
if 'dashboard' not in payload:
payload = {'dashboard': payload}
# define http header
headers = grafana_headers(module, data)
grafana_version = get_grafana_version(module, data['grafana_url'], headers)
if grafana_version < 5:
if data.get('slug'):
uid = data['slug']
elif 'meta' in payload and 'slug' in payload['meta']:
uid = payload['meta']['slug']
else:
raise GrafanaMalformedJson('No slug found in json. Needed with grafana < 5')
else:
if data.get('uid'):
uid = data['uid']
elif 'uid' in payload['dashboard']:
uid = payload['dashboard']['uid']
else:
uid = None
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], uid, headers=headers)
result = {}
if dashboard_exists is True:
if dashboard == payload:
# unchanged
result['uid'] = uid
result['msg'] = "Dashboard %s unchanged." % uid
result['changed'] = False
else:
# update
if 'overwrite' in data and data['overwrite']:
payload['overwrite'] = True
if 'message' in data and data['message']:
payload['message'] = data['message']
r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST')
if info['status'] == 200:
if grafana_version >= 5:
try:
dashboard = json.loads(r.read())
uid = dashboard['uid']
except Exception as e:
raise GrafanaAPIException(e)
result['uid'] = uid
result['msg'] = "Dashboard %s updated" % uid
result['changed'] = True
else:
body = json.loads(info['body'])
raise GrafanaAPIException('Unable to update the dashboard %s : %s' % (uid, body['message']))
else:
# create
r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST')
if info['status'] == 200:
result['msg'] = "Dashboard %s created" % uid
result['changed'] = True
if grafana_version >= 5:
try:
dashboard = json.loads(r.read())
uid = dashboard['uid']
except Exception as e:
raise GrafanaAPIException(e)
result['uid'] = uid
else:
raise GrafanaAPIException('Unable to create the new dashboard %s : %s - %s.' % (uid, info['status'], info))
return result
def grafana_delete_dashboard(module, data):
# define http headers
headers = grafana_headers(module, data)
grafana_version = get_grafana_version(module, data['grafana_url'], headers)
if grafana_version < 5:
if data.get('slug'):
uid = data['slug']
else:
raise GrafanaMalformedJson('No slug parameter. Needed with grafana < 5')
else:
if data.get('uid'):
uid = data['uid']
else:
raise GrafanaDeleteException('No uid specified %s')
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], uid, headers=headers)
result = {}
if dashboard_exists is True:
# delete
if grafana_version < 5:
r, info = fetch_url(module, '%s/api/dashboards/db/%s' % (data['grafana_url'], uid), headers=headers, method='DELETE')
else:
r, info = fetch_url(module, '%s/api/dashboards/uid/%s' % (data['grafana_url'], uid), headers=headers, method='DELETE')
if info['status'] == 200:
result['msg'] = "Dashboard %s deleted" % uid
result['changed'] = True
result['uid'] = uid
else:
raise GrafanaAPIException('Unable to update the dashboard %s : %s' % (uid, info))
else:
# dashboard does not exist, do nothing
result = {'msg': "Dashboard %s does not exist." % uid,
'changed': False,
'uid': uid}
return result
def grafana_export_dashboard(module, data):
# define http headers
headers = grafana_headers(module, data)
grafana_version = get_grafana_version(module, data['grafana_url'], headers)
if grafana_version < 5:
if data.get('slug'):
uid = data['slug']
else:
raise GrafanaMalformedJson('No slug parameter. Needed with grafana < 5')
else:
if data.get('uid'):
uid = data['uid']
else:
raise GrafanaExportException('No uid specified')
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], uid, headers=headers)
if dashboard_exists is True:
try:
with open(data['path'], 'w') as f:
f.write(json.dumps(dashboard))
except Exception as e:
raise GrafanaExportException("Can't write json file : %s" % to_native(e))
result = {'msg': "Dashboard %s exported to %s" % (uid, data['path']),
'uid': uid,
'changed': True}
else:
result = {'msg': "Dashboard %s does not exist." % uid,
'uid': uid,
'changed': False}
return result
def main():
# use the predefined argument spec for url
argument_spec = url_argument_spec()
# remove unnecessary arguments
del argument_spec['force']
del argument_spec['force_basic_auth']
del argument_spec['http_agent']
argument_spec.update(
state=dict(choices=['present', 'absent', 'export'], default='present'),
url=dict(aliases=['grafana_url'], required=True),
url_username=dict(aliases=['grafana_user'], default='admin'),
url_password=dict(aliases=['grafana_password'], default='admin', no_log=True),
grafana_api_key=dict(type='str', no_log=True),
org_id=dict(default=1, type='int'),
uid=dict(type='str'),
slug=dict(type='str'),
path=dict(type='str'),
overwrite=dict(type='bool', default=False),
message=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_together=[['url_username', 'url_password', 'org_id']],
mutually_exclusive=[['grafana_user', 'grafana_api_key'], ['uid', 'slug']],
)
try:
if module.params['state'] == 'present':
result = grafana_create_dashboard(module, module.params)
elif module.params['state'] == 'absent':
result = grafana_delete_dashboard(module, module.params)
else:
result = grafana_export_dashboard(module, module.params)
except GrafanaAPIException as e:
module.fail_json(
failed=True,
msg="error : %s" % to_native(e)
)
return
except GrafanaMalformedJson as e:
module.fail_json(
failed=True,
msg="error : json file does not contain a meta section with a slug parameter, or you did'nt specify the slug parameter"
)
return
except GrafanaDeleteException as e:
module.fail_json(
failed=True,
msg="error : Can't delete dashboard : %s" % to_native(e)
)
return
except GrafanaExportException as e:
module.fail_json(
failed=True,
msg="error : Can't export dashboard : %s" % to_native(e)
)
return
module.exit_json(
failed=False,
**result
)
return
if __name__ == '__main__':
main()
| gpl-3.0 |
2014c2g2/teamwork | wsgi/static/reeborg/src/libraries/brython/Lib/posix.py | 11 | 20408 | """This module provides access to operating system functionality that is
standardized by the C Standard and the POSIX standard (a thinly
disguised Unix interface). Refer to the library manual and
corresponding Unix manual entries for more information on calls."""
import datetime
F_OK = 0
O_APPEND = 8
O_BINARY = 32768
O_CREAT = 256
O_EXCL = 1024
O_NOINHERIT = 128
O_RANDOM = 16
O_RDONLY = 0
O_RDWR = 2
O_SEQUENTIAL = 32
O_SHORT_LIVED = 4096
O_TEMPORARY = 64
O_TEXT = 16384
O_TRUNC = 512
O_WRONLY = 1
P_DETACH = 4
P_NOWAIT = 1
P_NOWAITO = 3
P_OVERLAY = 2
P_WAIT = 0
R_OK = 4
TMP_MAX = 32767
W_OK = 2
X_OK = 1
class __loader__:
pass
def _exit(*args,**kw):
"""_exit(status)
Exit to the system with specified status, without normal exit processing."""
pass
def _getdiskusage(*args,**kw):
"""_getdiskusage(path) -> (total, free)
Return disk usage statistics about the given path as (total, free) tuple."""
pass
def _getfileinformation(*args,**kw):
pass
def _getfinalpathname(*args,**kw):
pass
def _getfullpathname(*args,**kw):
pass
_have_functions = ['MS_WINDOWS']
def _isdir(*args,**kw):
"""Return true if the pathname refers to an existing directory."""
pass
def abort(*args,**kw):
"""abort() -> does not return!
Abort the interpreter immediately. This 'dumps core' or otherwise fails
in the hardest way possible on the hosting operating system."""
pass
def access(*args,**kw):
"""access(path, mode, *, dir_fd=None, effective_ids=False, follow_symlinks=True)
Use the real uid/gid to test for access to a path. Returns True if granted,
False otherwise.
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
If effective_ids is True, access will use the effective uid/gid instead of
the real uid/gid.
If follow_symlinks is False, and the last element of the path is a symbolic
link, access will examine the symbolic link itself instead of the file the
link points to.
dir_fd, effective_ids, and follow_symlinks may not be implemented
on your platform. If they are unavailable, using them will raise a
NotImplementedError.
Note that most operations will use the effective uid/gid, therefore this
routine can be used in a suid/sgid environment to test if the invoking user
has the specified access to the path.
The mode argument can be F_OK to test existence, or the inclusive-OR
of R_OK, W_OK, and X_OK."""
pass
def chdir(*args,**kw):
"""chdir(path)
Change the current working directory to the specified path.
path may always be specified as a string.
On some platforms, path may also be specified as an open file descriptor.
If this functionality is unavailable, using it raises an exception."""
pass
def chmod(*args,**kw):
"""chmod(path, mode, *, dir_fd=None, follow_symlinks=True)
Change the access permissions of a file.
path may always be specified as a string.
On some platforms, path may also be specified as an open file descriptor.
If this functionality is unavailable, using it raises an exception.
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
If follow_symlinks is False, and the last element of the path is a symbolic
link, chmod will modify the symbolic link itself instead of the file the
link points to.
It is an error to use dir_fd or follow_symlinks when specifying path as
an open file descriptor.
dir_fd and follow_symlinks may not be implemented on your platform.
If they are unavailable, using them will raise a NotImplementedError."""
pass
def close(*args,**kw):
"""close(fd)
Close a file descriptor (for low level IO)."""
pass
def closerange(*args,**kw):
"""closerange(fd_low, fd_high)
Closes all file descriptors in [fd_low, fd_high), ignoring errors."""
pass
def device_encoding(*args,**kw):
"""device_encoding(fd) -> str
Return a string describing the encoding of the device
if the output is a terminal; else return None."""
pass
def dup(*args,**kw):
"""dup(fd) -> fd2
Return a duplicate of a file descriptor."""
pass
def dup2(*args,**kw):
"""dup2(old_fd, new_fd)
Duplicate file descriptor."""
pass
environ = {}
error = OSError
def execv(*args,**kw):
"""execv(path, args)
Execute an executable path with arguments, replacing current process.
path: path of executable file
args: tuple or list of strings"""
pass
def execve(*args,**kw):
"""execve(path, args, env)
Execute a path with arguments and environment, replacing current process.
path: path of executable file
args: tuple or list of arguments
env: dictionary of strings mapping to strings
On some platforms, you may specify an open file descriptor for path;
execve will execute the program the file descriptor is open to.
If this functionality is unavailable, using it raises NotImplementedError."""
pass
def fstat(*args,**kw):
"""fstat(fd) -> stat result
Like stat(), but for an open file descriptor.
Equivalent to stat(fd=fd)."""
pass
def fsync(*args,**kw):
"""fsync(fildes)
force write of file with filedescriptor to disk."""
pass
def get_terminal_size(*args,**kw):
"""Return the size of the terminal window as (columns, lines).
The optional argument fd (default standard output) specifies
which file descriptor should be queried.
If the file descriptor is not connected to a terminal, an OSError
is thrown.
This function will only be defined if an implementation is
available for this system.
shutil.get_terminal_size is the high-level function which should
normally be used, os.get_terminal_size is the low-level implementation."""
pass
def getcwd(*args,**kw):
"""getcwd() -> path
Return a unicode string representing the current working directory."""
return __BRYTHON__.brython_path # XXX fix me
def getcwdb(*args,**kw):
"""getcwdb() -> path
Return a bytes string representing the current working directory."""
pass
def getlogin(*args,**kw):
"""getlogin() -> string
Return the actual login name."""
pass
def getpid(*args,**kw):
"""getpid() -> pid
Return the current process id"""
return 0
def getppid(*args,**kw):
"""getppid() -> ppid
Return the parent's process id. If the parent process has already exited,
Windows machines will still return its id; others systems will return the id
of the 'init' process (1)."""
pass
def isatty(*args,**kw):
"""isatty(fd) -> bool
Return True if the file descriptor 'fd' is an open file descriptor
connected to the slave end of a terminal."""
pass
def kill(*args,**kw):
"""kill(pid, sig)
Kill a process with a signal."""
pass
def link(*args,**kw):
"""link(src, dst, *, src_dir_fd=None, dst_dir_fd=None, follow_symlinks=True)
Create a hard link to a file.
If either src_dir_fd or dst_dir_fd is not None, it should be a file
descriptor open to a directory, and the respective path string (src or dst)
should be relative; the path will then be relative to that directory.
If follow_symlinks is False, and the last element of src is a symbolic
link, link will create a link to the symbolic link itself instead of the
file the link points to.
src_dir_fd, dst_dir_fd, and follow_symlinks may not be implemented on your
platform. If they are unavailable, using them will raise a
NotImplementedError."""
pass
def listdir(*args,**kw):
"""listdir(path='.') -> list_of_filenames
Return a list containing the names of the files in the directory.
The list is in arbitrary order. It does not include the special
entries '.' and '..' even if they are present in the directory.
path can be specified as either str or bytes. If path is bytes,
the filenames returned will also be bytes; in all other circumstances
the filenames returned will be str.
On some platforms, path may also be specified as an open file descriptor;
the file descriptor must refer to a directory.
If this functionality is unavailable, using it raises NotImplementedError."""
pass
def lseek(*args,**kw):
"""lseek(fd, pos, how) -> newpos
Set the current position of a file descriptor.
Return the new cursor position in bytes, starting from the beginning."""
pass
def lstat(*args,**kw):
"""lstat(path, *, dir_fd=None) -> stat result
Like stat(), but do not follow symbolic links.
Equivalent to stat(path, follow_symlinks=False)."""
return stat_result()
def mkdir(*args,**kw):
"""mkdir(path, mode=0o777, *, dir_fd=None)
Create a directory.
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
dir_fd may not be implemented on your platform.
If it is unavailable, using it will raise a NotImplementedError.
The mode argument is ignored on Windows."""
pass
def open(*args,**kw):
"""open(path, flags, mode=0o777, *, dir_fd=None)
Open a file for low level IO. Returns a file handle (integer).
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
dir_fd may not be implemented on your platform.
If it is unavailable, using it will raise a NotImplementedError."""
pass
def pipe(*args,**kw):
"""pipe() -> (read_end, write_end)
Create a pipe."""
pass
def putenv(*args,**kw):
"""putenv(key, value)
Change or add an environment variable."""
pass
def read(*args,**kw):
"""read(fd, buffersize) -> string
Read a file descriptor."""
pass
def readlink(*args,**kw):
"""readlink(path, *, dir_fd=None) -> path
Return a string representing the path to which the symbolic link points.
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
dir_fd may not be implemented on your platform.
If it is unavailable, using it will raise a NotImplementedError."""
pass
def remove(*args,**kw):
"""remove(path, *, dir_fd=None)
Remove a file (same as unlink()).
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
dir_fd may not be implemented on your platform.
If it is unavailable, using it will raise a NotImplementedError."""
pass
def rename(*args,**kw):
"""rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
Rename a file or directory.
If either src_dir_fd or dst_dir_fd is not None, it should be a file
descriptor open to a directory, and the respective path string (src or dst)
should be relative; the path will then be relative to that directory.
src_dir_fd and dst_dir_fd, may not be implemented on your platform.
If they are unavailable, using them will raise a NotImplementedError."""
pass
def replace(*args,**kw):
"""replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
Rename a file or directory, overwriting the destination.
If either src_dir_fd or dst_dir_fd is not None, it should be a file
descriptor open to a directory, and the respective path string (src or dst)
should be relative; the path will then be relative to that directory.
src_dir_fd and dst_dir_fd, may not be implemented on your platform.
If they are unavailable, using them will raise a NotImplementedError."""
pass
def rmdir(*args,**kw):
"""rmdir(path, *, dir_fd=None)
Remove a directory.
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
dir_fd may not be implemented on your platform.
If it is unavailable, using it will raise a NotImplementedError."""
pass
def spawnv(*args,**kw):
"""spawnv(mode, path, args)
Execute the program 'path' in a new process.
mode: mode of process creation
path: path of executable file
args: tuple or list of strings"""
pass
def spawnve(*args,**kw):
"""spawnve(mode, path, args, env)
Execute the program 'path' in a new process.
mode: mode of process creation
path: path of executable file
args: tuple or list of arguments
env: dictionary of strings mapping to strings"""
pass
def startfile(*args,**kw):
"""startfile(filepath [, operation]) - Start a file with its associated application.
When "operation" is not specified or "open", this acts like
double-clicking the file in Explorer, or giving the file name as an
argument to the DOS "start" command: the file is opened with whatever
application (if any) its extension is associated.
When another "operation" is given, it specifies what should be done with
the file. A typical operation is "print".
startfile returns as soon as the associated application is launched.
There is no option to wait for the application to close, and no way
to retrieve the application's exit status.
The filepath is relative to the current directory. If you want to use
an absolute path, make sure the first character is not a slash ("/");
the underlying Win32 ShellExecute function doesn't work if it is."""
pass
def stat(*args,**kw):
"""stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result
Perform a stat system call on the given path.
path may be specified as either a string or as an open file descriptor.
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
dir_fd may not be supported on your platform; if it is unavailable, using
it will raise a NotImplementedError.
If follow_symlinks is False, and the last element of the path is a symbolic
link, stat will examine the symbolic link itself instead of the file the
link points to.
It is an error to use dir_fd or follow_symlinks when specifying path as
an open file descriptor."""
return stat_result()
def stat_float_times(*args,**kw):
"""stat_float_times([newval]) -> oldval
Determine whether os.[lf]stat represents time stamps as float objects.
If newval is True, future calls to stat() return floats, if it is False,
future calls return ints.
If newval is omitted, return the current setting.
"""
pass
class stat_result:
def __init__(self):
"""st_mode - protection bits,
st_ino - inode number,
st_dev - device,
st_nlink - number of hard links,
st_uid - user id of owner,
st_gid - group id of owner,
st_size - size of file, in bytes,
st_atime - time of most recent access expressed in seconds,
st_mtime - time of most recent content modification expressed in
seconds,
st_ctime - platform dependent; time of most recent metadata change on
Unix, or the time of creation on Windows, expressed in seconds
st_atime_ns - time of most recent access expressed in nanoseconds as an
integer,
st_mtime_ns - time of most recent content modification expressed in
nanoseconds as an integer,
st_ctime_ns - platform dependent; time of most recent metadata change
on Unix, or the time of creation on Windows, expressed in
nanoseconds as an integer """
# Brython : fake values
self.st_atime = datetime.datetime.now()
self.st_mtime = self.st_ctime = self.st_atime_ns = \
self.st_mtime_ns = self.st_ctime_ns = self.st_atime
self.st_uid = self.st_gid = self.st_ino = -1
self.st_mode = 0
self.st_size = 1
class statvfs_result:
pass
def strerror(*args,**kw):
"""strerror(code) -> string
Translate an error code to a message string."""
pass
def symlink(*args,**kw):
"""symlink(src, dst, target_is_directory=False, *, dir_fd=None)
Create a symbolic link pointing to src named dst.
target_is_directory is required on Windows if the target is to be
interpreted as a directory. (On Windows, symlink requires
Windows 6.0 or greater, and raises a NotImplementedError otherwise.)
target_is_directory is ignored on non-Windows platforms.
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
dir_fd may not be implemented on your platform.
If it is unavailable, using it will raise a NotImplementedError."""
pass
def system(*args,**kw):
"""system(command) -> exit_status
Execute the command (a string) in a subshell."""
pass
class terminal_size:
pass
def times(*args,**kw):
"""times() -> times_result
Return an object containing floating point numbers indicating process
times. The object behaves like a named tuple with these fields:
(utime, stime, cutime, cstime, elapsed_time)"""
pass
class times_result:
pass
def umask(*args,**kw):
"""umask(new_mask) -> old_mask
Set the current numeric umask and return the previous umask."""
pass
class uname_result:
pass
def unlink(*args,**kw):
"""unlink(path, *, dir_fd=None)
Remove a file (same as remove()).
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
dir_fd may not be implemented on your platform.
If it is unavailable, using it will raise a NotImplementedError."""
pass
def urandom(n):
"""urandom(n) -> str
Return n random bytes suitable for cryptographic use."""
import __random
randbytes= [chr(__random.randint(0,256)) for i in range(n)]
return ''.join(randbytes)
def utime(*args,**kw):
"""utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) Set the access and modified time of path.
path may always be specified as a string.
On some platforms, path may also be specified as an open file descriptor.
If this functionality is unavailable, using it raises an exception.
If times is not None, it must be a tuple (atime, mtime);
atime and mtime should be expressed as float seconds since the epoch.
If ns is not None, it must be a tuple (atime_ns, mtime_ns);
atime_ns and mtime_ns should be expressed as integer nanoseconds
since the epoch.
If both times and ns are None, utime uses the current time.
Specifying tuples for both times and ns is an error.
If dir_fd is not None, it should be a file descriptor open to a directory,
and path should be relative; path will then be relative to that directory.
If follow_symlinks is False, and the last element of the path is a symbolic
link, utime will modify the symbolic link itself instead of the file the
link points to.
It is an error to use dir_fd or follow_symlinks when specifying path
as an open file descriptor.
dir_fd and follow_symlinks may not be available on your platform.
If they are unavailable, using them will raise a NotImplementedError."""
pass
def waitpid(*args,**kw):
"""waitpid(pid, options) -> (pid, status << 8)
Wait for completion of a given process. options is ignored on Windows."""
pass
def write(*args,**kw):
"""write(fd, string) -> byteswritten
Write a string to a file descriptor."""
pass
| gpl-2.0 |
switchboardOp/ansible | contrib/inventory/cloudstack.py | 119 | 9137 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Ansible CloudStack external inventory script.
=============================================
Generates Ansible inventory from CloudStack. Configuration is read from
'cloudstack.ini'. If you need to pass the project, write a simple wrapper
script, e.g. project_cloudstack.sh:
#!/bin/bash
cloudstack.py --project <your_project> $@
When run against a specific host, this script returns the following attributes
based on the data obtained from CloudStack API:
"web01": {
"cpu_number": 2,
"nic": [
{
"ip": "10.102.76.98",
"mac": "02:00:50:99:00:01",
"type": "Isolated",
"netmask": "255.255.255.0",
"gateway": "10.102.76.1"
},
{
"ip": "10.102.138.63",
"mac": "06:b7:5a:00:14:84",
"type": "Shared",
"netmask": "255.255.255.0",
"gateway": "10.102.138.1"
}
],
"default_ip": "10.102.76.98",
"zone": "ZUERICH",
"created": "2014-07-02T07:53:50+0200",
"hypervisor": "VMware",
"memory": 2048,
"state": "Running",
"tags": [],
"cpu_speed": 1800,
"affinity_group": [],
"service_offering": "Small",
"cpu_used": "62%"
}
usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] [--domain DOMAIN]
"""
from __future__ import print_function
import sys
import argparse
try:
import json
except:
import simplejson as json
try:
from cs import CloudStack, CloudStackException, read_config
except ImportError:
print("Error: CloudStack library must be installed: pip install cs.",
file=sys.stderr)
sys.exit(1)
class CloudStackInventory(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
parser.add_argument('--project')
parser.add_argument('--domain')
options = parser.parse_args()
try:
self.cs = CloudStack(**read_config())
except CloudStackException:
print("Error: Could not connect to CloudStack API", file=sys.stderr)
domain_id = None
if options.domain:
domain_id = self.get_domain_id(options.domain)
project_id = None
if options.project:
project_id = self.get_project_id(options.project, domain_id)
if options.host:
data = self.get_host(options.host, project_id, domain_id)
print(json.dumps(data, indent=2))
elif options.list:
data = self.get_list(project_id, domain_id)
print(json.dumps(data, indent=2))
else:
print("usage: --list | --host <hostname> [--project <project>] [--domain <domain_path>]", file=sys.stderr)
sys.exit(1)
def get_domain_id(self, domain):
domains = self.cs.listDomains(listall=True)
if domains:
for d in domains['domain']:
if d['path'].lower() == domain.lower():
return d['id']
print("Error: Domain %s not found." % domain, file=sys.stderr)
sys.exit(1)
def get_project_id(self, project, domain_id=None):
projects = self.cs.listProjects(domainid=domain_id)
if projects:
for p in projects['project']:
if p['name'] == project or p['id'] == project:
return p['id']
print("Error: Project %s not found." % project, file=sys.stderr)
sys.exit(1)
def get_host(self, name, project_id=None, domain_id=None):
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id)
data = {}
if not hosts:
return data
for host in hosts['virtualmachine']:
host_name = host['displayname']
if name == host_name:
data['zone'] = host['zonename']
if 'group' in host:
data['group'] = host['group']
data['state'] = host['state']
data['service_offering'] = host['serviceofferingname']
data['affinity_group'] = host['affinitygroup']
data['security_group'] = host['securitygroup']
data['cpu_number'] = host['cpunumber']
data['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['cpu_used'] = host['cpuused']
data['memory'] = host['memory']
data['tags'] = host['tags']
data['hypervisor'] = host['hypervisor']
data['created'] = host['created']
data['nic'] = []
for nic in host['nic']:
data['nic'].append({
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
})
if nic['isdefault']:
data['default_ip'] = nic['ipaddress']
break
return data
def get_list(self, project_id=None, domain_id=None):
data = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
},
}
groups = self.cs.listInstanceGroups(projectid=project_id, domainid=domain_id)
if groups:
for group in groups['instancegroup']:
group_name = group['name']
if group_name and group_name not in data:
data[group_name] = {
'hosts': []
}
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id)
if not hosts:
return data
for host in hosts['virtualmachine']:
host_name = host['displayname']
data['all']['hosts'].append(host_name)
data['_meta']['hostvars'][host_name] = {}
# Make a group per zone
data['_meta']['hostvars'][host_name]['zone'] = host['zonename']
group_name = host['zonename']
if group_name not in data:
data[group_name] = {
'hosts': []
}
data[group_name]['hosts'].append(host_name)
if 'group' in host:
data['_meta']['hostvars'][host_name]['group'] = host['group']
data['_meta']['hostvars'][host_name]['state'] = host['state']
data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname']
data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup']
data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup']
data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber']
data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['memory'] = host['memory']
data['_meta']['hostvars'][host_name]['tags'] = host['tags']
data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['nic'] = []
for nic in host['nic']:
data['_meta']['hostvars'][host_name]['nic'].append({
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
})
if nic['isdefault']:
data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress']
group_name = ''
if 'group' in host:
group_name = host['group']
if group_name and group_name in data:
data[group_name]['hosts'].append(host_name)
return data
if __name__ == '__main__':
CloudStackInventory()
| gpl-3.0 |
goldsborough/.emacs | .emacs.d/.python-environments/default/lib/python3.5/encodings/cp869.py | 272 | 32965 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP869.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp869',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: None, # UNDEFINED
0x0081: None, # UNDEFINED
0x0082: None, # UNDEFINED
0x0083: None, # UNDEFINED
0x0084: None, # UNDEFINED
0x0085: None, # UNDEFINED
0x0086: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0087: None, # UNDEFINED
0x0088: 0x00b7, # MIDDLE DOT
0x0089: 0x00ac, # NOT SIGN
0x008a: 0x00a6, # BROKEN BAR
0x008b: 0x2018, # LEFT SINGLE QUOTATION MARK
0x008c: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x008d: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x008e: 0x2015, # HORIZONTAL BAR
0x008f: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x0090: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x0091: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x0092: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x0093: None, # UNDEFINED
0x0094: None, # UNDEFINED
0x0095: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x0096: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x0097: 0x00a9, # COPYRIGHT SIGN
0x0098: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0099: 0x00b2, # SUPERSCRIPT TWO
0x009a: 0x00b3, # SUPERSCRIPT THREE
0x009b: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x009e: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x009f: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00a0: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00a1: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00a2: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00a3: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00a4: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00a5: 0x0392, # GREEK CAPITAL LETTER BETA
0x00a6: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00a7: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00a8: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00a9: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00aa: 0x0397, # GREEK CAPITAL LETTER ETA
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ad: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x00b7: 0x039c, # GREEK CAPITAL LETTER MU
0x00b8: 0x039d, # GREEK CAPITAL LETTER NU
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x039e, # GREEK CAPITAL LETTER XI
0x00be: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x03a0, # GREEK CAPITAL LETTER PI
0x00c7: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00d0: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00d1: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00d2: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00d3: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00d4: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00d5: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00d6: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00d7: 0x03b2, # GREEK SMALL LETTER BETA
0x00d8: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x03b4, # GREEK SMALL LETTER DELTA
0x00de: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b6, # GREEK SMALL LETTER ZETA
0x00e1: 0x03b7, # GREEK SMALL LETTER ETA
0x00e2: 0x03b8, # GREEK SMALL LETTER THETA
0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA
0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00e6: 0x03bc, # GREEK SMALL LETTER MU
0x00e7: 0x03bd, # GREEK SMALL LETTER NU
0x00e8: 0x03be, # GREEK SMALL LETTER XI
0x00e9: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00ea: 0x03c0, # GREEK SMALL LETTER PI
0x00eb: 0x03c1, # GREEK SMALL LETTER RHO
0x00ec: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00ed: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ee: 0x03c4, # GREEK SMALL LETTER TAU
0x00ef: 0x0384, # GREEK TONOS
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00f3: 0x03c6, # GREEK SMALL LETTER PHI
0x00f4: 0x03c7, # GREEK SMALL LETTER CHI
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x03c8, # GREEK SMALL LETTER PSI
0x00f7: 0x0385, # GREEK DIALYTIKA TONOS
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fc: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00fd: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\ufffe' # 0x0080 -> UNDEFINED
'\ufffe' # 0x0081 -> UNDEFINED
'\ufffe' # 0x0082 -> UNDEFINED
'\ufffe' # 0x0083 -> UNDEFINED
'\ufffe' # 0x0084 -> UNDEFINED
'\ufffe' # 0x0085 -> UNDEFINED
'\u0386' # 0x0086 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\ufffe' # 0x0087 -> UNDEFINED
'\xb7' # 0x0088 -> MIDDLE DOT
'\xac' # 0x0089 -> NOT SIGN
'\xa6' # 0x008a -> BROKEN BAR
'\u2018' # 0x008b -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x008c -> RIGHT SINGLE QUOTATION MARK
'\u0388' # 0x008d -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u2015' # 0x008e -> HORIZONTAL BAR
'\u0389' # 0x008f -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0x0090 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u03aa' # 0x0091 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u038c' # 0x0092 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\ufffe' # 0x0093 -> UNDEFINED
'\ufffe' # 0x0094 -> UNDEFINED
'\u038e' # 0x0095 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u03ab' # 0x0096 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\xa9' # 0x0097 -> COPYRIGHT SIGN
'\u038f' # 0x0098 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\xb2' # 0x0099 -> SUPERSCRIPT TWO
'\xb3' # 0x009a -> SUPERSCRIPT THREE
'\u03ac' # 0x009b -> GREEK SMALL LETTER ALPHA WITH TONOS
'\xa3' # 0x009c -> POUND SIGN
'\u03ad' # 0x009d -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0x009e -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0x009f -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03ca' # 0x00a0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u0390' # 0x00a1 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03cc' # 0x00a2 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0x00a3 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u0391' # 0x00a4 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x00a5 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x00a6 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x00a7 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x00a8 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x00a9 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x00aa -> GREEK CAPITAL LETTER ETA
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\u0398' # 0x00ac -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x00ad -> GREEK CAPITAL LETTER IOTA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u039a' # 0x00b5 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x00b7 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x00b8 -> GREEK CAPITAL LETTER NU
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u039e' # 0x00bd -> GREEK CAPITAL LETTER XI
'\u039f' # 0x00be -> GREEK CAPITAL LETTER OMICRON
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u03a0' # 0x00c6 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x00c7 -> GREEK CAPITAL LETTER RHO
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u03a3' # 0x00cf -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0x00d0 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x00d1 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x00d2 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x00d3 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x00d4 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x00d5 -> GREEK CAPITAL LETTER OMEGA
'\u03b1' # 0x00d6 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x00d7 -> GREEK SMALL LETTER BETA
'\u03b3' # 0x00d8 -> GREEK SMALL LETTER GAMMA
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u03b4' # 0x00dd -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x00de -> GREEK SMALL LETTER EPSILON
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b6' # 0x00e0 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0x00e1 -> GREEK SMALL LETTER ETA
'\u03b8' # 0x00e2 -> GREEK SMALL LETTER THETA
'\u03b9' # 0x00e3 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x00e4 -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x00e6 -> GREEK SMALL LETTER MU
'\u03bd' # 0x00e7 -> GREEK SMALL LETTER NU
'\u03be' # 0x00e8 -> GREEK SMALL LETTER XI
'\u03bf' # 0x00e9 -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0x00ea -> GREEK SMALL LETTER PI
'\u03c1' # 0x00eb -> GREEK SMALL LETTER RHO
'\u03c3' # 0x00ec -> GREEK SMALL LETTER SIGMA
'\u03c2' # 0x00ed -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0x00ee -> GREEK SMALL LETTER TAU
'\u0384' # 0x00ef -> GREEK TONOS
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u03c5' # 0x00f2 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0x00f3 -> GREEK SMALL LETTER PHI
'\u03c7' # 0x00f4 -> GREEK SMALL LETTER CHI
'\xa7' # 0x00f5 -> SECTION SIGN
'\u03c8' # 0x00f6 -> GREEK SMALL LETTER PSI
'\u0385' # 0x00f7 -> GREEK DIALYTIKA TONOS
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u03c9' # 0x00fa -> GREEK SMALL LETTER OMEGA
'\u03cb' # 0x00fb -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03b0' # 0x00fc -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03ce' # 0x00fd -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a3: 0x009c, # POUND SIGN
0x00a6: 0x008a, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x0097, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x0089, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x0099, # SUPERSCRIPT TWO
0x00b3: 0x009a, # SUPERSCRIPT THREE
0x00b7: 0x0088, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x0384: 0x00ef, # GREEK TONOS
0x0385: 0x00f7, # GREEK DIALYTIKA TONOS
0x0386: 0x0086, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x008d, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x008f, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x0090, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x0092, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x0095, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x0098, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0390: 0x00a1, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x0391: 0x00a4, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x00a5, # GREEK CAPITAL LETTER BETA
0x0393: 0x00a6, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x00a7, # GREEK CAPITAL LETTER DELTA
0x0395: 0x00a8, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x00a9, # GREEK CAPITAL LETTER ZETA
0x0397: 0x00aa, # GREEK CAPITAL LETTER ETA
0x0398: 0x00ac, # GREEK CAPITAL LETTER THETA
0x0399: 0x00ad, # GREEK CAPITAL LETTER IOTA
0x039a: 0x00b5, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x00b7, # GREEK CAPITAL LETTER MU
0x039d: 0x00b8, # GREEK CAPITAL LETTER NU
0x039e: 0x00bd, # GREEK CAPITAL LETTER XI
0x039f: 0x00be, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x00c6, # GREEK CAPITAL LETTER PI
0x03a1: 0x00c7, # GREEK CAPITAL LETTER RHO
0x03a3: 0x00cf, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x00d0, # GREEK CAPITAL LETTER TAU
0x03a5: 0x00d1, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x00d2, # GREEK CAPITAL LETTER PHI
0x03a7: 0x00d3, # GREEK CAPITAL LETTER CHI
0x03a8: 0x00d4, # GREEK CAPITAL LETTER PSI
0x03a9: 0x00d5, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x0091, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x0096, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x009b, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x009d, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x009e, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x009f, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b0: 0x00fc, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x03b1: 0x00d6, # GREEK SMALL LETTER ALPHA
0x03b2: 0x00d7, # GREEK SMALL LETTER BETA
0x03b3: 0x00d8, # GREEK SMALL LETTER GAMMA
0x03b4: 0x00dd, # GREEK SMALL LETTER DELTA
0x03b5: 0x00de, # GREEK SMALL LETTER EPSILON
0x03b6: 0x00e0, # GREEK SMALL LETTER ZETA
0x03b7: 0x00e1, # GREEK SMALL LETTER ETA
0x03b8: 0x00e2, # GREEK SMALL LETTER THETA
0x03b9: 0x00e3, # GREEK SMALL LETTER IOTA
0x03ba: 0x00e4, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00e5, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00e6, # GREEK SMALL LETTER MU
0x03bd: 0x00e7, # GREEK SMALL LETTER NU
0x03be: 0x00e8, # GREEK SMALL LETTER XI
0x03bf: 0x00e9, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00ea, # GREEK SMALL LETTER PI
0x03c1: 0x00eb, # GREEK SMALL LETTER RHO
0x03c2: 0x00ed, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00ec, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ee, # GREEK SMALL LETTER TAU
0x03c5: 0x00f2, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00f3, # GREEK SMALL LETTER PHI
0x03c7: 0x00f4, # GREEK SMALL LETTER CHI
0x03c8: 0x00f6, # GREEK SMALL LETTER PSI
0x03c9: 0x00fa, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00a0, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00fb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00a2, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00a3, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00fd, # GREEK SMALL LETTER OMEGA WITH TONOS
0x2015: 0x008e, # HORIZONTAL BAR
0x2018: 0x008b, # LEFT SINGLE QUOTATION MARK
0x2019: 0x008c, # RIGHT SINGLE QUOTATION MARK
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
Tiglas/pickup-planner | request_handler/flask/lib/python2.7/site-packages/wheel/signatures/__init__.py | 565 | 3779 | """
Create and verify jws-js format Ed25519 signatures.
"""
__all__ = [ 'sign', 'verify' ]
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header":native(encoded_header),
"signature":native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and not "kty" in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
| mit |
abought/osf.io | website/addons/forward/views/config.py | 3 | 1679 | """Views for the node settings page."""
# -*- coding: utf-8 -*-
import httplib as http
from flask import request
from modularodm.exceptions import ValidationError
from framework.exceptions import HTTPError
from website.project.decorators import (
must_have_addon,
must_have_permission,
must_not_be_registration,
must_be_valid_project)
from website.addons.forward.utils import serialize_settings
@must_be_valid_project
@must_have_addon('forward', 'node')
def forward_config_get(node_addon, **kwargs):
return serialize_settings(node_addon)
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('forward', 'node')
def forward_config_put(auth, node_addon, **kwargs):
"""Set configuration for forward node settings, adding a log if URL has
changed.
:param-json str url: Forward URL
:raises: HTTPError(400) if values missing or invalid
"""
try:
node_addon.url = request.json['url']
node_addon.label = request.json.get('label')
except (KeyError, TypeError, ValueError):
raise HTTPError(http.BAD_REQUEST)
# Save settings and get changed fields; crash if validation fails
try:
saved_fields = node_addon.save()
except ValidationError:
raise HTTPError(http.BAD_REQUEST)
# Log change if URL updated
if 'url' in saved_fields:
node_addon.owner.add_log(
action='forward_url_changed',
params=dict(
node=node_addon.owner._id,
project=node_addon.owner.parent_id,
forward_url=node_addon.url,
),
auth=auth,
save=True,
)
return {}
| apache-2.0 |
dnlm92/chokoretto | main/lib/unidecode/x026.py | 165 | 4020 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'#', # 0x6f
'', # 0x70
'', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| mit |
terbolous/CouchPotatoServer | libs/subliminal/api.py | 106 | 5646 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .core import (SERVICES, LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE,
MATCHING_CONFIDENCE, create_list_tasks, consume_task, create_download_tasks,
group_by_video, key_subtitles)
from .language import language_set, language_list, LANGUAGES
import logging
__all__ = ['list_subtitles', 'download_subtitles']
logger = logging.getLogger(__name__)
def list_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None):
"""List subtitles in given paths according to the criteria
:param paths: path(s) to video file or folder
:type paths: string or list
:param languages: languages to search for, in preferred order
:type languages: list of :class:`~subliminal.language.Language` or string
:param list services: services to use for the search, in preferred order
:param bool force: force searching for subtitles even if some are detected
:param bool multi: search multiple languages for the same video
:param string cache_dir: path to the cache directory to use
:param int max_depth: maximum depth for scanning entries
:param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
:return: found subtitles
:rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`]
"""
services = services or SERVICES
languages = language_set(languages) if languages is not None else language_set(LANGUAGES)
if isinstance(paths, basestring):
paths = [paths]
if any([not isinstance(p, unicode) for p in paths]):
logger.warning(u'Not all entries are unicode')
results = []
service_instances = {}
tasks = create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
for task in tasks:
try:
result = consume_task(task, service_instances)
results.append((task.video, result))
except:
logger.error(u'Error consuming task %r' % task, exc_info=True)
for service_instance in service_instances.itervalues():
service_instance.terminate()
return group_by_video(results)
def download_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None, order=None):
"""Download subtitles in given paths according to the criteria
:param paths: path(s) to video file or folder
:type paths: string or list
:param languages: languages to search for, in preferred order
:type languages: list of :class:`~subliminal.language.Language` or string
:param list services: services to use for the search, in preferred order
:param bool force: force searching for subtitles even if some are detected
:param bool multi: search multiple languages for the same video
:param string cache_dir: path to the cache directory to use
:param int max_depth: maximum depth for scanning entries
:param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
:param order: preferred order for subtitles sorting
:type list: list of :data:`~subliminal.core.LANGUAGE_INDEX`, :data:`~subliminal.core.SERVICE_INDEX`, :data:`~subliminal.core.SERVICE_CONFIDENCE`, :data:`~subliminal.core.MATCHING_CONFIDENCE`
:return: downloaded subtitles
:rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`]
.. note::
If you use ``multi=True``, :data:`~subliminal.core.LANGUAGE_INDEX` has to be the first item of the ``order`` list
or you might get unexpected results.
"""
services = services or SERVICES
languages = language_list(languages) if languages is not None else language_list(LANGUAGES)
if isinstance(paths, basestring):
paths = [paths]
order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE]
subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
for video, subtitles in subtitles_by_video.iteritems():
subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True)
results = []
service_instances = {}
tasks = create_download_tasks(subtitles_by_video, languages, multi)
for task in tasks:
try:
result = consume_task(task, service_instances)
results.append((task.video, result))
except:
logger.error(u'Error consuming task %r' % task, exc_info=True)
for service_instance in service_instances.itervalues():
service_instance.terminate()
return group_by_video(results)
| gpl-3.0 |
nricklin/PyGithub | github/GistHistoryState.py | 74 | 10159 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
import github.CommitStats
import github.Gist
class GistHistoryState(github.GithubObject.CompletableGithubObject):
"""
This class represents GistHistoryStates as returned for example by http://developer.github.com/v3/todo
"""
@property
def change_status(self):
"""
:type: :class:`github.CommitStats.CommitStats`
"""
self._completeIfNotSet(self._change_status)
return self._change_status.value
@property
def comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._comments)
return self._comments.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def committed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._committed_at)
return self._committed_at.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def files(self):
"""
:type: dict of string to :class:`github.GistFile.GistFile`
"""
self._completeIfNotSet(self._files)
return self._files.value
@property
def forks(self):
"""
:type: list of :class:`github.Gist.Gist`
"""
self._completeIfNotSet(self._forks)
return self._forks.value
@property
def forks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._forks_url)
return self._forks_url.value
@property
def git_pull_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_pull_url)
return self._git_pull_url.value
@property
def git_push_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_push_url)
return self._git_push_url.value
@property
def history(self):
"""
:type: list of :class:`GistHistoryState`
"""
self._completeIfNotSet(self._history)
return self._history.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: string
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def owner(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def public(self):
"""
:type: bool
"""
self._completeIfNotSet(self._public)
return self._public.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
@property
def version(self):
"""
:type: string
"""
self._completeIfNotSet(self._version)
return self._version.value
def _initAttributes(self):
self._change_status = github.GithubObject.NotSet
self._comments = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._committed_at = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._files = github.GithubObject.NotSet
self._forks = github.GithubObject.NotSet
self._forks_url = github.GithubObject.NotSet
self._git_pull_url = github.GithubObject.NotSet
self._git_push_url = github.GithubObject.NotSet
self._history = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
self._public = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
self._version = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "change_status" in attributes: # pragma no branch
self._change_status = self._makeClassAttribute(github.CommitStats.CommitStats, attributes["change_status"])
if "comments" in attributes: # pragma no branch
self._comments = self._makeIntAttribute(attributes["comments"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "committed_at" in attributes: # pragma no branch
self._committed_at = self._makeDatetimeAttribute(attributes["committed_at"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "files" in attributes: # pragma no branch
self._files = self._makeDictOfStringsToClassesAttribute(github.GistFile.GistFile, attributes["files"])
if "forks" in attributes: # pragma no branch
self._forks = self._makeListOfClassesAttribute(github.Gist.Gist, attributes["forks"])
if "forks_url" in attributes: # pragma no branch
self._forks_url = self._makeStringAttribute(attributes["forks_url"])
if "git_pull_url" in attributes: # pragma no branch
self._git_pull_url = self._makeStringAttribute(attributes["git_pull_url"])
if "git_push_url" in attributes: # pragma no branch
self._git_push_url = self._makeStringAttribute(attributes["git_push_url"])
if "history" in attributes: # pragma no branch
self._history = self._makeListOfClassesAttribute(GistHistoryState, attributes["history"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "public" in attributes: # pragma no branch
self._public = self._makeBoolAttribute(attributes["public"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
if "version" in attributes: # pragma no branch
self._version = self._makeStringAttribute(attributes["version"])
| gpl-3.0 |
drewp/tahoe-lafs | misc/simulators/simulate_load.py | 10 | 4916 | #!/usr/bin/env python
# WARNING. There is a bug in this script so that it does not simulate the actual Tahoe Two server selection algorithm that it was intended to simulate. See http://allmydata.org/trac/tahoe-lafs/ticket/302 (stop permuting peerlist, use SI as offset into ring instead?)
import random
SERVER_CAPACITY = 10**12
class Server:
def __init__(self):
self.si = random.randrange(0, 2**31)
self.used = 0
self.max = SERVER_CAPACITY
self.full_at_tick = None
def __repr__(self):
if self.full_at_tick is not None:
return "<%s %s full at %d>" % (self.__class__.__name__, self.si, self.full_at_tick)
else:
return "<%s %s>" % (self.__class__.__name__, self.si)
SERVERS = 4
K = 3
N = 10
def make_up_a_file_size():
return (2 ** random.randrange(8, 31))
def go(permutedpeerlist):
servers = [ Server() for x in range(SERVERS) ]
servers.sort(cmp=lambda x,y: cmp(x.si, y.si))
doubled_up_shares = 0
tick = 0
fullservers = 0
while True:
nextsharesize = make_up_a_file_size() / K
if permutedpeerlist:
random.shuffle(servers)
else:
# rotate a random number
rot = random.randrange(0, len(servers))
servers = servers[rot:] + servers[:rot]
i = 0
wrapped = False
sharestoput = N
while sharestoput:
server = servers[i]
if server.used + nextsharesize < server.max:
server.used += nextsharesize
sharestoput -= 1
if wrapped:
doubled_up_shares += 1
else:
if server.full_at_tick is None:
server.full_at_tick = tick
fullservers += 1
if fullservers == len(servers):
# print "Couldn't place share -- all servers full. Stopping."
return (servers, doubled_up_shares)
i += 1
if i == len(servers):
wrapped = True
i = 0
tick += 1
def div_ceil(n, d):
"""
The smallest integer k such that k*d >= n.
"""
return (n/d) + (n%d != 0)
DESIRED_COLUMNS = 70
START_FILES = 137000
STOP_FILES = 144000
def test(permutedpeerlist, iters):
# The i'th element of the filledat list is how many servers got full when the i'th file was uploaded.
filledat = []
for test in range(iters):
(servers, doubled_up_shares) = go(permutedpeerlist)
print "doubled_up_shares: ", doubled_up_shares
for server in servers:
fidx = server.full_at_tick
filledat.extend([0]*(fidx-len(filledat)+1))
filledat[fidx] += 1
startfiles = 0
while filledat[startfiles] == 0:
startfiles += 1
filespercolumn = div_ceil(len(filledat) - startfiles, (DESIRED_COLUMNS - 3))
# to make comparisons between runs line up:
# startfiles = START_FILES
# filespercolumn = div_ceil(STOP_FILES - startfiles, (DESIRED_COLUMNS - 3))
# The i'th element of the compressedfilledat list is how many servers got full when the filespercolumn files starting at startfiles + i were uploaded.
compressedfilledat = []
idx = startfiles
while idx < len(filledat):
compressedfilledat.append(0)
for i in range(filespercolumn):
compressedfilledat[-1] += filledat[idx]
idx += 1
if idx >= len(filledat):
break
# The i'th element of the fullat list is how many servers were full by the tick numbered startfiles + i * filespercolumn (on average).
fullat = [0] * len(compressedfilledat)
for idx, num in enumerate(compressedfilledat):
for fidx in range(idx, len(fullat)):
fullat[fidx] += num
for idx in range(len(fullat)):
fullat[idx] = fullat[idx] / float(iters)
# Now print it out as an ascii art graph.
import sys
for serversfull in range(40, 0, -1):
sys.stdout.write("%2d " % serversfull)
for numfull in fullat:
if int(numfull) == serversfull:
sys.stdout.write("*")
else:
sys.stdout.write(" ")
sys.stdout.write("\n")
sys.stdout.write(" ^-- servers full\n")
idx = 0
while idx < len(fullat):
nextmark = "%d--^ " % (startfiles + idx * filespercolumn)
sys.stdout.write(nextmark)
idx += len(nextmark)
sys.stdout.write("\nfiles uploaded --> \n")
if __name__ == "__main__":
import sys
iters = 16
for arg in sys.argv:
if arg.startswith("--iters="):
iters = int(arg[8:])
if "--permute" in sys.argv:
print "doing permuted peerlist, iterations: %d" % iters
test(True, iters)
else:
print "doing simple ring, iterations: %d" % iters
test(False, iters)
| gpl-2.0 |
seenaburns/lifts | database.py | 1 | 4543 | """
---
database.py
~~~~~~~~~~~
Lifts flatfile database accessing / querying functions.
"""
# For with clause, create new db_manager that connects
def dbm(db_file):
return DB_Manager(db_file)
class DB_Manager():
def __init__(self, db_file):
self.db_file = db_file
pass
# __enter__ and __exit set to allow for with clause
# returns an enabled DB_Manager
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.close()
def connect(self):
self.db = open(self.db_file, 'r+')
def close(self):
self.db.close()
def get_notes(self, entry):
# Extract the note portion of an entry (assumes well formed)
notes = None
if '"' in entry:
notes = '"' + entry.split('"')[1] + '"\n'
return notes
def get_date(self, entry):
# Extract date from (assumes well formed)
return entry.split(' ')[0]
def get_liftname(self, entry):
# Extract liftname from entry (assumes well formed)
return entry.split(' ')[1]
def get_unit(self, entry):
# Returns 'kg' if set to kg, 'lbs' if not (assumes well formed)
if entry.split(' ')[2] == 'kg':
return 'kg'
return 'lbs'
def get_sets(self, entry):
# Extract sets (as list) from entry (assumes well formed)
cleaned = entry
print entry.replace('\n', '')
# Remove notes
notes = self.get_notes(entry)
if notes != None:
cleaned = cleaned[:-len(notes)]
# Remove date, liftname, kg/lbs (if set)
elems = [x.replace('\n', '') for x in cleaned.split(' ')]
elems = [x for x in elems if x != '']
if elems[2] in ['kg', 'lbs']:
return elems[3:]
else:
return elems[2:]
def convert_entry_to_lbs(self, entry):
kg_sets = self.get_sets(entry)
lbs_sets = []
for s in kg_sets:
# Extract kg, reps (optional), remove failed if present
kg = s.split('x')[0]
reps = ''
if 'x' in s:
reps = 'x' + s.split('x')[1]
failed = ''
if '-' in s:
failed = '-'
kg = kg.split('-')[0]
reps = reps.split('-')[0]
lbs = int(round(float(kg) * 2.20462))
lbs_sets.append('%s%s%s' % (str(lbs), reps, failed))
return_entry = '%s %s %s' % (self.get_date(entry), self.get_liftname(entry), ' '.join(lbs_sets))
if self.get_notes(entry) is not None:
return return_entry + ' ' + self.get_notes(entry)
else:
return return_entry + '\n'
def normalize_liftname(self, name):
# Remove uppercase and spaces from lift name
return name.replace(' ', '').lower()
def process_results(self, result_list, include_comments):
# Processing entires:
# - remove empty lines
# - allow comments (starts with #)
# - convert to kg if needed
new_results = []
for result in result_list:
# Remove empty lines
if len(result.replace('\n', '')) == 0:
continue
# Handle comment
if result[0] == '#':
if include_comments:
new_results.append(result)
continue
elif ' kg ' in result:
# Convert to lbs
new_results.append(self.convert_entry_to_lbs(result))
else:
new_results.append(result)
return new_results
def add_entry(self, entry):
# Insert entry to database at start of file, ensure newline at
# end of entry
contents = self.db.readlines()
contents.insert(0, entry.replace('\n', '') + '\n')
self.db.seek(0)
self.db.write(''.join(contents))
def search(self, name):
# Search for entries with lift name
# * grabs all
contents = self.db.readlines()
normal_name = self.normalize_liftname(name)
results = [x for x in contents if normal_name in x]
if name == '*':
results = contents
return self.process_results(results, False)
def logs(self, limit):
# Return most recent n logs by date
# TODO: return by date not by exercise
contents = self.db.readlines()
return self.process_results(contents[:limit], True)
| bsd-3-clause |
eoneil1942/voltdb-4.7fix | tests/ee/indexes/index_scripted_test.py | 2 | 2299 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from subprocess import Popen, PIPE
import os
import sys
print sys.path[0]
script = """
# setup commands:
# begin indexname indextypes schema
# types:
# bint = big integer
# int = integer
# sint = small integer
# tint = tiny integer
# float = double (float)
# dec = decimal
# str = string
# general commands:
# is = insert expecting success
# if = insert expecting failure
# ls = lookup expecting success
# lf = lookup expecting failure
# us = update expecting success
# uf = update expecting failure
begin TestName MultiIntsTree,MultiGenericTree,MultiIntsHash,MultiGenericHash,UniqueIntsTree,UniqueGenericTree,UniqueIntsHash,UniqueGenericHash bint,bint,bint
is 5,6,7
ls 5,6,7
#us 5,6,7 8,9,10
#uf 5,6,7 8,9,10
#ds 5,6,7
df 8,9,10
exec
begin GenericTest MultiGenericTree,MultiGenericHash,UniqueGenericTree,UniqueGenericHash str4,bint,bint
is foo,6,7
ls foo,6,7
#us foo,6,7 bar,9,10
#uf foo,6,7 bar,9,10
ds foo,6,7
df bar,9,10
exec
done
"""
p = Popen(os.path.join(sys.path[0], "index_scripted_test"),
shell=False, stdin=PIPE, close_fds=True)
def write(x):
p.stdin.write(x)
write(script)
retcode = p.wait()
sys.exit(retcode)
| agpl-3.0 |
nwspeete-ibm/openwhisk | tools/travis/box-upload.py | 1 | 1039 | #!/usr/bin/env python
import os
import subprocess
import sys
import tempfile
import urllib
# Compresses the contents of a folder and upload the result to Box.
# Run this script as:
#
# $ upload-logs.py LOG_DIR DEST_NAME
#
# e.g.:
#
# $ upload-logs.py /tmp/wsklogs logs-5512.tar.gz
def upload_file(local_file, remote_file):
if remote_file[0] == '/':
remote_file = remote_file[1:]
subprocess.call([ "curl", "-X", "POST", "--data-binary", "@%s" % local_file, "http://wsklogfwd.mybluemix.net/upload?%s" % urllib.urlencode({ "name" : remote_file }) ])
def tar_gz_dir(dir_path):
_, dst = tempfile.mkstemp(suffix = ".tar.gz")
subprocess.call([ "tar", "-cvzf", dst, dir_path ])
return dst
if __name__ == "__main__":
dir_path = sys.argv[1]
dst_path = sys.argv[2]
if not os.path.isdir(dir_path):
print "Directory doesn't exist: %s." % dir_path
sys.exit(0)
print "Compressing logs dir..."
tar = tar_gz_dir(dir_path)
print "Uploading to Box..."
upload_file(tar, dst_path)
| apache-2.0 |
nikhilraog/boto | boto/ec2/elb/attributes.py | 153 | 5103 | # Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Created by Chris Huegle for TellApart, Inc.
class ConnectionSettingAttribute(object):
"""
Represents the ConnectionSetting segment of ELB Attributes.
"""
def __init__(self, connection=None):
self.idle_timeout = None
def __repr__(self):
return 'ConnectionSettingAttribute(%s)' % (
self.idle_timeout)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'IdleTimeout':
self.idle_timeout = int(value)
class CrossZoneLoadBalancingAttribute(object):
"""
Represents the CrossZoneLoadBalancing segement of ELB Attributes.
"""
def __init__(self, connection=None):
self.enabled = None
def __repr__(self):
return 'CrossZoneLoadBalancingAttribute(%s)' % (
self.enabled)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
class AccessLogAttribute(object):
"""
Represents the AccessLog segment of ELB attributes.
"""
def __init__(self, connection=None):
self.enabled = None
self.s3_bucket_name = None
self.s3_bucket_prefix = None
self.emit_interval = None
def __repr__(self):
return 'AccessLog(%s, %s, %s, %s)' % (
self.enabled,
self.s3_bucket_name,
self.s3_bucket_prefix,
self.emit_interval
)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'S3BucketName':
self.s3_bucket_name = value
elif name == 'S3BucketPrefix':
self.s3_bucket_prefix = value
elif name == 'EmitInterval':
self.emit_interval = int(value)
class ConnectionDrainingAttribute(object):
"""
Represents the ConnectionDraining segment of ELB attributes.
"""
def __init__(self, connection=None):
self.enabled = None
self.timeout = None
def __repr__(self):
return 'ConnectionDraining(%s, %s)' % (
self.enabled,
self.timeout
)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'Timeout':
self.timeout = int(value)
class LbAttributes(object):
"""
Represents the Attributes of an Elastic Load Balancer.
"""
def __init__(self, connection=None):
self.connection = connection
self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute(
self.connection)
self.access_log = AccessLogAttribute(self.connection)
self.connection_draining = ConnectionDrainingAttribute(self.connection)
self.connecting_settings = ConnectionSettingAttribute(self.connection)
def __repr__(self):
return 'LbAttributes(%s, %s, %s, %s)' % (
repr(self.cross_zone_load_balancing),
repr(self.access_log),
repr(self.connection_draining),
repr(self.connecting_settings))
def startElement(self, name, attrs, connection):
if name == 'CrossZoneLoadBalancing':
return self.cross_zone_load_balancing
if name == 'AccessLog':
return self.access_log
if name == 'ConnectionDraining':
return self.connection_draining
if name == 'ConnectionSettings':
return self.connecting_settings
def endElement(self, name, value, connection):
pass
| mit |
gwct/core | generators/cds-aln/04_codon_filter.py | 1 | 14930 | #!/usr/bin/python
############################################################
# For Penn genomes, 06.2020
# Takes a log file from a clipkit run on amino acid sequence
# and removes corresponding sites from codon alignment.
############################################################
import sys, os, core, coreseq, argparse
############################################################
# Options
parser = argparse.ArgumentParser(description="Codon alignment check filter");
parser.add_argument("-i", dest="input", help="Directory of CDS alignments.", default=False);
#parser.add_argument("-w", dest="wsize", help="Codon window size. Default: 3", type=int, default=3);
parser.add_argument("-o", dest="output", help="Desired output directory for filtered CDS alignments.", default=False);
parser.add_argument("-n", dest="name", help="A short name for all files associated with this job.", default=False);
parser.add_argument("-e", dest="expected", help="The expected number of species in each alignment file. Check for one-to-one alignments to only align sequences that retained all species after trimming.", default=False);
parser.add_argument("--noncoding", dest="noncoding", help="Set this option to check non-coding data. Will not check for stop codons.", action="store_true", default=False);
parser.add_argument("--protein", dest="protein", help="Set this option to check amino acid data.", action="store_true", default=False);
parser.add_argument("--count", dest="count_only", help="Set this option to just provide the log file with counts/stats. Will not write new sequences", action="store_true", default=False);
parser.add_argument("--overwrite", dest="overwrite", help="If the output directory already exists and you wish to overwrite it, set this option.", action="store_true", default=False);
# IO options
args = parser.parse_args();
if not args.input or not os.path.isdir(args.input):
sys.exit( " * Error 1: An input directory with aligned CDS sequences must be defined with -i.");
args.input = os.path.abspath(args.input);
# if args.wsize < 1:
# sys.exit(" * Error 2: Window size (-w) must be a positive integer.");
if not args.name:
name = core.getRandStr();
else:
name = args.name;
if not args.count_only and not args.output:
sys.exit( " * Error 2: An output directory must be defined with -o.");
if not args.count_only:
args.output = os.path.abspath(args.output);
if os.path.isdir(args.output) and not args.overwrite:
sys.exit( " * Error 3: Output directory (-o) already exists! Explicity specify --overwrite to overwrite it.");
if args.noncoding and args.protein:
sys.exit(" * Error 4: Please specify only one of --noncoding or --protein.");
elif args.noncoding:
mode = "nt";
elif args.protein:
mode = "aa";
else:
mode = "codon";
expected = False;
if args.expected:
eflag = False;
try:
expected = int(args.expected);
except:
eflag = True;
if eflag or expected < 1:
sys.exit(" * Error 4: -e must be a positive integer.");
# IO option error checking
pad = 26
cwd = os.getcwd();
# Job vars
log_file = os.path.join("logs", name + ".log");
# Job files
##########################
# Reporting run-time info for records.
with open(log_file, "w") as logfile:
core.runTime("# CDS alignment filter", logfile);
core.PWS("# IO OPTIONS", logfile);
core.PWS(core.spacedOut("# Input CDS directory:", pad) + args.input, logfile);
core.PWS(core.spacedOut("# Input sequence type:", pad) + mode, logfile);
#core.PWS(core.spacedOut("# Codon window size:", pad) + str(args.wsize), logfile);
if not args.name:
core.PWS("# -n not specified --> Generating random string for job name", logfile);
core.PWS(core.spacedOut("# Job name:", pad) + name, logfile);
if not args.count_only:
core.PWS(core.spacedOut("# Output directory:", pad) + args.output, logfile);
if args.overwrite:
core.PWS(core.spacedOut("# --overwrite set:", pad) + "Overwriting previous files in output directory.", logfile);
if not os.path.isdir(args.output):
core.PWS("# Creating output directory.", logfile);
os.system("mkdir " + args.output);
else:
core.PWS(core.spacedOut("# --count set:", pad) + "Will not output sequences, ignoring -o.", logfile);
core.PWS(core.spacedOut("# Log file:", pad) + log_file, logfile);
core.PWS("# ----------------", logfile);
##########################
# Filtering CDS aligns
core.PWS("# " + core.getDateTime() + " Beginning filter...", logfile);
if mode == "aa":
headers = ["Align", "Seq length", "Short seq", "Num uniq seqs", "Num ident seqs", "Percent ident seqs", "Percent ident seqs high", "No info sites", "Percent no info sites", "Percent no info high"];
if mode == "nt":
headers = ["Align", "Seq length", "Short seq", "Num uniq seqs", "Num ident seqs", "Percent ident seqs", "Percent ident seqs high", "No info sites", "Percent no info sites", "Percent no info high"];
else:
headers = ["Align", "Seq length", "Codon length", "Short seq", "Num uniq seqs", "Num ident seqs", "Percent ident seqs", "Percent ident seqs high", "No info sites", "Percent no info sites", "Percent no info high", "Premature stop codons", "Percent seq premature stop codons", "Premature stop percent high"];
# The global headers
fa_files = [ f for f in os.listdir(args.input) if f.endswith(".fa") ];
num_alns = len(fa_files);
num_alns_str = str(num_alns);
num_alns_len = len(num_alns_str);
# Read align file names from input directory
written, num_short, num_high_ident, num_gappy, aln_prem_stop, num_stoppy = 0.0,0.0,0.0,0.0,0.0,0.0;
# Some count variables for all aligns
spec_high = {};
# The dictionary to keep track of species count variables
first_aln, counter, skipped = True, 0, 0;
# Loop tracking variables
for f in fa_files:
if counter % 500 == 0:
counter_str = str(counter);
while len(counter_str) != num_alns_len:
counter_str = "0" + counter_str;
print ("> " + core.getDateTime() + " " + counter_str + " / " + num_alns_str);
counter += 1;
# Loop progress
cur_infile = os.path.join(args.input, f);
if not args.count_only:
cur_outfile = os.path.join(args.output, f.replace(".fa", ".filter.fa"));
# Get the current in and output files
# Make sure all files exist
#print(f);
#print(cur_outfile);
#sys.exit();
seqs = core.fastaGetDict(cur_infile);
num_seqs = float(len(seqs));
# Read the sequences
if expected and num_seqs != expected:
print("# Expected number of species not found... skipping: " + cur_infile + "\n");
skipped += 1;
continue;
if first_aln:
# Initialize some things on the first alignment
spec_headers_order = [];
for title in seqs:
short_title = title.split(" ")[0];
headers.append(short_title + " gaps/Ns");
headers.append(short_title + " percent gaps/Ns");
headers.append(short_title + " high percent gaps/Ns");
if mode == "codon":
headers.append(short_title + " prem stop");
spec_headers_order.append(short_title);
# For output, the species headers should retain an order set here
spec_high[short_title] = { 'high-gaps' : 0, 'prem-stop' : 0 };
# Initialize the overall counts for each species
logfile.write("\t".join(headers) + "\n");
first_aln = False;
# Write the headers and set the first flag to false
codon_seqs, spec_out = {}, {};
seq_prem_stop = 0;
first_seq, prem_stop_flag, short_seq, high_ident = True, False, "FALSE", "FALSE";
# Variables for the current alignment
for title in seqs:
short_title = title.split(" ")[0];
spec_out[short_title] = { 'gaps' : 0.0, 'perc-gaps' : 0.0, 'high-gaps' : "FALSE", 'prem-stop' : "FALSE" };
# Initialize the species variables for this alignment
if first_seq:
seq_len = float(len(seqs[title]));
if seq_len < 100:
num_short += 1;
short_seq = "TRUE";
# Get the length of the alignment
codon_len = float(len(seqs[title]) / 3);
#last_codon_ind = codon_len - 2;
# Get the total number of codons in the alignment
first_seq = False;
# Get the length of the alignment from the first seq only
#codon_list = [ seqs[title][i:i+3] for i in range(0, len(seqs[title]), 3) ];
#codon_seqs[title] = codon_list;
# Get the codon sequence.
seqs[title] = seqs[title].replace("!", "N");
# Replace MACSE's frameshift ! char with N
spec_out['gaps'] = float(seqs[title].count("-") + seqs[title].count("N"));
spec_out['perc-gaps'] = round((spec_out['gaps'] / seq_len) * 100, 2);
if spec_out['perc-gaps'] > 20:
spec_high[short_title]['high-gaps'] += 1;
spec_out['high-gaps'] = "TRUE";
# Count the number of gappy/uncalled sites for this sequence
if mode == "codon":
stop, seqs[title] = coreseq.premStopCheck(seqs[title], allowlastcodon=True, rmlast=True);
if stop:
spec_high[short_title]['prem-stop'] += 1;
spec_out[short_title]['prem-stop'] = "TRUE";
seq_prem_stop += 1.0;
prem_stop_flag = True;
# Check for premature stop codons and remove last codon if it is a stop
# End sequence loop
ident_seqs = [];
uniq_seqs = [];
for t1 in seqs:
cur_seq = seqs[t1]
seq_count = seqs.values().count(cur_seq);
if seq_count > 1 and cur_seq not in ident_seqs:
for i in range(seq_count):
ident_seqs.append(cur_seq);
elif seq_count == 1:
uniq_seqs.append(cur_seq);
num_ident = len(ident_seqs);
num_uniq = len(uniq_seqs) + len((set(ident_seqs)));
perc_ident = round((num_ident / num_seqs) * 100, 2);
if perc_ident > 50:
high_ident = "TRUE";
num_high_ident += 1;
# Check for identical sequences. This has to be done after the loop above because it changes sequences.
if mode == "codon":
if prem_stop_flag:
aln_prem_stop += 1;
# If any of the sequences in this alignment had a premature stop, add it to the count here
perc_prem_stop = round((seq_prem_stop / num_seqs) * 100, 2);
prem_stop_high = "FALSE";
if perc_prem_stop > 20:
num_stoppy += 1;
prem_stop_high = "TRUE";
# Check if a high percentage of sequences contain premature stop codons
no_info_sites = 0.0;
for i in range(int(seq_len)):
num_gaps = 0.0;
for title in seqs:
if seqs[title][i] in ["-", "N"]:
num_gaps += 1.0;
if num_gaps == num_seqs:
no_info_sites += 1.0;
# Count the number of columns that are all gaps or Ns
perc_no_info_sites = round((no_info_sites / seq_len) * 100, 2);
perc_no_info_high = "FALSE";
if perc_no_info_sites > 20:
num_gappy += 1;
perc_no_info_high = "TRUE";
# Check if the number of columns that are all gaps or Ns is high
if mode == "aa":
outline = [f, str(seq_len), short_seq, str(num_uniq), str(num_ident), str(perc_ident), high_ident, str(no_info_sites), str(perc_no_info_sites), perc_no_info_high];
elif mode == "nt":
outline = [f, str(seq_len), short_seq, str(num_uniq), str(num_ident), str(perc_ident), high_ident, str(no_info_sites), str(perc_no_info_sites), perc_no_info_high];
else:
outline = [f, str(seq_len), str(codon_len), short_seq, str(num_uniq), str(num_ident), str(perc_ident), high_ident, str(no_info_sites), str(perc_no_info_sites), perc_no_info_high, str(seq_prem_stop), str(perc_prem_stop), prem_stop_high];
for short_title in spec_headers_order:
outline.append(str(spec_out[short_title]['gaps']));
outline.append(str(spec_out[short_title]['perc-gaps']));
outline.append(spec_out[short_title]['high-gaps']);
if mode == "codon":
outline.append(spec_out[short_title]['prem-stop']);
logfile.write("\t".join(outline) + "\n");
# Write the log output line with counts for the current alignment
if not args.count_only and not prem_stop_flag and short_seq == "FALSE" and high_ident == "FALSE" and perc_no_info_high == "FALSE":
with open(cur_outfile, "w") as outfile:
for title in seqs:
outfile.write(title + "\n");
outfile.write(seqs[title] + "\n");
written += 1;
# Write the edited sequence to the output file if there are no premature stop codons
core.PWS("# ----------------", logfile);
core.PWS(core.spacedOut("# Total aligns", 55) + str(num_alns), logfile);
core.PWS(core.spacedOut("# Files skipped: ", pad) + str(skipped), logfile);
core.PWS(core.spacedOut("# Aligns written", 55) + str(written), logfile);
core.PWS(core.spacedOut("# Aligns shorter than 100bp", 55) + str(num_short), logfile);
core.PWS(core.spacedOut("# Aligns with >50% identical sequences", 55) + str(num_high_ident), logfile);
core.PWS(core.spacedOut("# Aligns with >20% of gappy/Ny sites", 55) + str(num_gappy), logfile);
core.PWS(core.spacedOut("# Aligns with at least one premature stop:", 55) + str(aln_prem_stop), logfile);
if not args.noncoding:
core.PWS(core.spacedOut("# Aligns with >20% of seqs with premature stops", 55) + str(num_stoppy), logfile);
core.PWS("# ----------------", logfile);
# Write overall summary data
if args.noncoding:
spec_headers = "Spec\tNumber >20% gappy"
else:
spec_headers = "Spec\tNumber >20% gappy\tNumber premature stop"
core.PWS(spec_headers, logfile);
for title in spec_high:
if mode == "codon":
outline = [title, str(spec_high[title]['high-gaps']), str(spec_high[title]['prem-stop'])];
else:
outline = [title, str(spec_high[title]['high-gaps'])];
core.PWS("\t".join(outline), logfile);
core.PWS("# ----------------", logfile);
# Write species summary data
| gpl-3.0 |
fmv1992/python-mode | pymode/libs/pkg_resources/_vendor/packaging/__about__.py | 441 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.3"
__author__ = "Donald Stufft"
__email__ = "donald@stufft.io"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| lgpl-3.0 |
smukoehler/SDB-control | mpcdriver.py | 1 | 3535 | import urlparse
import datetime
import urllib2
from smap.driver import SmapDriver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
from sklearn import linear_model
from smap.archiver.client import RepublishClient
from functools import partial
from mpc import *
class SimpleMPC(SmapDriver):
def setup(self, opts):
self.rate = float( opts.get('rate' , 120 ) )
self.archiver_url = opts.get('archiver')
self.input_variables = opts.get('input_variables', None)
self.state_variables = opts.get('state_variables', None).split(',')
self.read_stream_data()
self.setup_model()
'''
Create MPC kernel
'''
def setup_model(self):
self.mpc_model = MPC()
'''
Function that runs periodically to update the model
'''
def start(self):
self._loop = periodicSequentialCall(self.predict)
self._loop.start(self.rate)
for clientlist in self.repubclients.itervalues():
for c in clientlist:
c.connect()
def predict(self):
# Input vector at step t-1
input_vector_t_1 = self.construct_input(-1)
# State vector at step t-1
state_vector_t_1 = self.construct_state(-1)
if input_vector_t_1 == None or state_vector_t_1 == None:
return
# Call mpc kernel to add data
self.mpc_model.add_data( input_vector_t_1 , state_vector_t_1 )
# Input vector at time t
input_vector_t = self.construct_input(0)
# predict by calling at mpc kernel
prediction = self.mpc_model.predict( input_vector_t )
# Get model parameters
params = self.mpc_model.get_model()
# Do post processing
self.post_processing(i , prediction, self.construct_state(i)[0], params)
'''
Reads data to be supplied to build the model
'''
def read_stream_data(self):
self.points = {}
self.repubclients = {}
for name in self.input_variables:
point = name.strip()
self.points[point] = []
self.repubclients[point] = [RepublishClient(self.archiver_url, partial(self.cb, point), restrict="Metadata/Name = '" + str(point) + "'")]
for name in self.state_variables:
point = name.strip()
self.points[point] = []
self.repubclients[point] = [RepublishClient(self.archiver_url, partial(self.cb, point), restrict="Metadata/Name = '" + str(point) + "'")]
def cb(self, point, _, data):
value = data[-1][-1][1]
print 'Received',point,'=',value
self.points[point].append(value)
'''
Constructs an input vector at a particular timestep
'''
def construct_input(self, step):
input_vector = []
try:
for point in self.input_variables:
input_vector.append( self.points[point][ step - 1] )
for point in self.state_variables:
input_vector.append( self.points[point][ step - 2] )
except:
return None
return input_vector
'''
Constructs the state vector at a particular timestep
'''
def construct_state(self, step):
state_vector = []
try:
for point in self.state_variables:
state_vector.append( self.points[point][ step - 1 ])
except:
return None
return state_vector
'''
Do post processing
'''
def post_processing(self, step, prediction, state_t, params ):
# Do post processing
for i in range(len(self.state_variables)):
self.add('/' + self.state_variables[i] + "-predicted" , prediction[i] )
for j in range(len(self.input_variables)):
self.add('/' + self.state_variables[i] + "-mpc-param-effect-of-" + self.input_variables[j], params[j])
for j in range(len(self.state_variables)):
self.add('/' + self.state_variables[i] + "-mpc-param-effect-of-" + self.state_variables[j], params[ len(self.input_variables) + j])
| bsd-2-clause |
total-impact/total-impact-core | test/unit_tests/test_provider_batch_data.py | 4 | 49398 | from totalimpact import db, app
from totalimpact import provider_batch_data
from totalimpact.provider_batch_data import ProviderBatchData
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import OperationalError
import os, json, copy
from nose.tools import raises, assert_equals, nottest
import unittest
from test.utils import setup_postgres_for_unittests, teardown_postgres_for_unittests
class TestProviderBatchData():
def setUp(self):
self.db = setup_postgres_for_unittests(db, app)
self.test_data = {
"raw": "<pmc-web-stat><request year=\"2012\" month=\"10\" jrid=\"elife\" eissn=\"2050-084X\"></request><response status=\"0\" collection=\"eLife\"></response><articles><article id=\"PMC3463246\"><meta-data doi=\"10.7554/eLife.00013\" pmcid=\"PMC3463246\" pubmed-id=\"23066504\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00013\"/><usage unique-ip=\"1368\" full-text=\"1464\" pdf=\"722\" abstract=\"119\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"144\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3463247\"><meta-data doi=\"10.7554/eLife.00240\" pmcid=\"PMC3463247\" pubmed-id=\"23066507\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00240\"/><usage unique-ip=\"514\" full-text=\"606\" pdf=\"230\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"9\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3465569\"><meta-data doi=\"10.7554/eLife.00242\" pmcid=\"PMC3465569\" pubmed-id=\"23066508\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00242\"/><usage unique-ip=\"473\" full-text=\"503\" pdf=\"181\" abstract=\"2\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"13\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3465570\"><meta-data doi=\"10.7554/eLife.00243\" pmcid=\"PMC3465570\" pubmed-id=\"23066509\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00243\"/><usage unique-ip=\"547\" full-text=\"636\" pdf=\"227\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"56\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3466591\"><meta-data doi=\"10.7554/eLife.00065\" pmcid=\"PMC3466591\" pubmed-id=\"23066506\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00065\"/><usage unique-ip=\"2516\" full-text=\"2804\" pdf=\"1583\" abstract=\"195\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"405\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3466783\"><meta-data doi=\"10.7554/eLife.00007\" pmcid=\"PMC3466783\" pubmed-id=\"23066503\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00007\"/><usage unique-ip=\"1331\" full-text=\"1412\" pdf=\"898\" abstract=\"224\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"109\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3467772\"><meta-data doi=\"10.7554/eLife.00270\" pmcid=\"PMC3467772\" pubmed-id=\"23066510\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00270\"/><usage unique-ip=\"1396\" full-text=\"1776\" pdf=\"625\" abstract=\"4\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3470722\"><meta-data doi=\"10.7554/eLife.00286\" pmcid=\"PMC3470722\" pubmed-id=\"23071903\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00286\"/><usage unique-ip=\"909\" full-text=\"1030\" pdf=\"376\" abstract=\"6\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3479833\"><meta-data doi=\"10.7554/eLife.00031\" pmcid=\"PMC3479833\" pubmed-id=\"23110253\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00031\"/><usage unique-ip=\"154\" full-text=\"126\" pdf=\"87\" abstract=\"26\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"13\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3470409\"><meta-data doi=\"10.7554/eLife.00048\" pmcid=\"PMC3470409\" pubmed-id=\"23066505\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00048\"/><usage unique-ip=\"1250\" full-text=\"1361\" pdf=\"911\" abstract=\"237\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"317\" supp-data=\"4\" cited-by=\"0\"/></article><article id=\"PMC3482692\"><meta-data doi=\"10.7554/eLife.00102\" pmcid=\"PMC3482692\" pubmed-id=\"23110254\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00102\"/><usage unique-ip=\"259\" full-text=\"232\" pdf=\"133\" abstract=\"36\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"8\" supp-data=\"3\" cited-by=\"0\"/></article><article id=\"PMC3482687\"><meta-data doi=\"10.7554/eLife.00281\" pmcid=\"PMC3482687\" pubmed-id=\"23110255\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00281\"/><usage unique-ip=\"75\" full-text=\"53\" pdf=\"47\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"1\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3482686\"><meta-data doi=\"10.7554/eLife.00005\" pmcid=\"PMC3482686\" pubmed-id=\"23110252\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00005\"/><usage unique-ip=\"324\" full-text=\"249\" pdf=\"263\" abstract=\"71\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"93\" supp-data=\"17\" cited-by=\"0\"/></article></articles></pmc-web-stat>",
"max_event_date": "2012-10-31T07:34:01.126892",
"provider": "pmc",
"aliases": {
"pmid": [
"23066504",
"23066507",
"23066508",
"23066509",
"23066506",
"23066503",
"23066510",
"23071903",
"23110253",
"23066505",
"23110254",
"23110255",
"23110252"
]
},
"provider_raw_version": 1,
"min_event_date": "2012-10-01T07:34:01.126892",
"created": "2012-11-29T07:34:01.126892"
}
self.old_doc = {
"_id": "pmc201304",
"_rev": "1-c9a62778077941736932cfb3510ee382",
"raw": "<pmc-web-stat><request year=\"2013\" month=\"04\" jrid=\"elife\" eissn=\"2050-084X\"></request><response status=\"0\" collection=\"eLife\"></response><articles><article id=\"PMC3463246\"><meta-data doi=\"10.7554/eLife.00013\" pmcid=\"PMC3463246\" pubmed-id=\"23066504\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00013\"/><usage unique-ip=\"112\" full-text=\"152\" pdf=\"22\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"31\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3463247\"><meta-data doi=\"10.7554/eLife.00240\" pmcid=\"PMC3463247\" pubmed-id=\"23066507\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00240\"/><usage unique-ip=\"10\" full-text=\"41\" pdf=\"1\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3465569\"><meta-data doi=\"10.7554/eLife.00242\" pmcid=\"PMC3465569\" pubmed-id=\"23066508\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00242\"/><usage unique-ip=\"27\" full-text=\"26\" pdf=\"3\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3465570\"><meta-data doi=\"10.7554/eLife.00243\" pmcid=\"PMC3465570\" pubmed-id=\"23066509\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00243\"/><usage unique-ip=\"41\" full-text=\"50\" pdf=\"9\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"8\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3466591\"><meta-data doi=\"10.7554/eLife.00065\" pmcid=\"PMC3466591\" pubmed-id=\"23066506\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00065\"/><usage unique-ip=\"252\" full-text=\"2522\" pdf=\"41\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"72\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3466783\"><meta-data doi=\"10.7554/eLife.00007\" pmcid=\"PMC3466783\" pubmed-id=\"23066503\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00007\"/><usage unique-ip=\"47\" full-text=\"62\" pdf=\"5\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"10\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3467772\"><meta-data doi=\"10.7554/eLife.00270\" pmcid=\"PMC3467772\" pubmed-id=\"23066510\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00270\"/><usage unique-ip=\"28\" full-text=\"32\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3470409\"><meta-data doi=\"10.7554/eLife.00048\" pmcid=\"PMC3470409\" pubmed-id=\"23066505\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00048\"/><usage unique-ip=\"72\" full-text=\"68\" pdf=\"20\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"26\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3470722\"><meta-data doi=\"10.7554/eLife.00286\" pmcid=\"PMC3470722\" pubmed-id=\"23071903\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00286\"/><usage unique-ip=\"37\" full-text=\"40\" pdf=\"7\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3479833\"><meta-data doi=\"10.7554/eLife.00031\" pmcid=\"PMC3479833\" pubmed-id=\"23110253\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00031\"/><usage unique-ip=\"31\" full-text=\"41\" pdf=\"3\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"7\" supp-data=\"0\" cited-by=\"1\"/></article><article id=\"PMC3482686\"><meta-data doi=\"10.7554/eLife.00005\" pmcid=\"PMC3482686\" pubmed-id=\"23110252\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00005\"/><usage unique-ip=\"189\" full-text=\"238\" pdf=\"33\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"110\" supp-data=\"2\" cited-by=\"0\"/></article><article id=\"PMC3482687\"><meta-data doi=\"10.7554/eLife.00281\" pmcid=\"PMC3482687\" pubmed-id=\"23110255\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00281\"/><usage unique-ip=\"17\" full-text=\"20\" pdf=\"0\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3482692\"><meta-data doi=\"10.7554/eLife.00102\" pmcid=\"PMC3482692\" pubmed-id=\"23110254\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00102\"/><usage unique-ip=\"50\" full-text=\"55\" pdf=\"11\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"7\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3485613\"><meta-data doi=\"10.7554/eLife.00301\" pmcid=\"PMC3485613\" pubmed-id=\"23150799\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00301\"/><usage unique-ip=\"41\" full-text=\"43\" pdf=\"7\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"7\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3485615\"><meta-data doi=\"10.7554/eLife.00049\" pmcid=\"PMC3485615\" pubmed-id=\"23150796\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00049\"/><usage unique-ip=\"390\" full-text=\"558\" pdf=\"108\" abstract=\"11\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"132\" supp-data=\"0\" cited-by=\"4\"/></article><article id=\"PMC3490148\"><meta-data doi=\"10.7554/eLife.00302\" pmcid=\"PMC3490148\" pubmed-id=\"23150800\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00302\"/><usage unique-ip=\"47\" full-text=\"54\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"8\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3490149\"><meta-data doi=\"10.7554/eLife.00068\" pmcid=\"PMC3490149\" pubmed-id=\"23150797\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00068\"/><usage unique-ip=\"42\" full-text=\"53\" pdf=\"12\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"9\" supp-data=\"1\" cited-by=\"0\"/></article><article id=\"PMC3491588\"><meta-data doi=\"10.7554/eLife.00003\" pmcid=\"PMC3491588\" pubmed-id=\"23150794\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00003\"/><usage unique-ip=\"62\" full-text=\"58\" pdf=\"20\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"13\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3492862\"><meta-data doi=\"10.7554/eLife.00011\" pmcid=\"PMC3492862\" pubmed-id=\"23150795\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00011\"/><usage unique-ip=\"68\" full-text=\"70\" pdf=\"9\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"18\" supp-data=\"6\" cited-by=\"0\"/></article><article id=\"PMC3494066\"><meta-data doi=\"10.7554/eLife.00173\" pmcid=\"PMC3494066\" pubmed-id=\"23150798\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00173\"/><usage unique-ip=\"39\" full-text=\"47\" pdf=\"3\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"12\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510452\"><meta-data doi=\"10.7554/eLife.00051\" pmcid=\"PMC3510452\" pubmed-id=\"23240081\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00051\"/><usage unique-ip=\"16\" full-text=\"16\" pdf=\"5\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510453\"><meta-data doi=\"10.7554/eLife.00078\" pmcid=\"PMC3510453\" pubmed-id=\"23240084\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00078\"/><usage unique-ip=\"33\" full-text=\"33\" pdf=\"8\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"1\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510454\"><meta-data doi=\"10.7554/eLife.00171\" pmcid=\"PMC3510454\" pubmed-id=\"23240086\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00171\"/><usage unique-ip=\"28\" full-text=\"34\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"14\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510455\"><meta-data doi=\"10.7554/eLife.00067\" pmcid=\"PMC3510455\" pubmed-id=\"23240082\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00067\"/><usage unique-ip=\"31\" full-text=\"34\" pdf=\"7\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"10\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510456\"><meta-data doi=\"10.7554/eLife.00070\" pmcid=\"PMC3510456\" pubmed-id=\"23240083\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00070\"/><usage unique-ip=\"37\" full-text=\"34\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"20\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510471\"><meta-data doi=\"10.7554/eLife.00365\" pmcid=\"PMC3510471\" pubmed-id=\"23240092\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00365\"/><usage unique-ip=\"13\" full-text=\"14\" pdf=\"1\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510472\"><meta-data doi=\"10.7554/eLife.00340\" pmcid=\"PMC3510472\" pubmed-id=\"23240089\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00340\"/><usage unique-ip=\"8\" full-text=\"10\" pdf=\"0\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510473\"><meta-data doi=\"10.7554/eLife.00351\" pmcid=\"PMC3510473\" pubmed-id=\"0\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00351\"/><usage unique-ip=\"8\" full-text=\"9\" pdf=\"1\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510474\"><meta-data doi=\"10.7554/eLife.00184\" pmcid=\"PMC3510474\" pubmed-id=\"23240087\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00184\"/><usage unique-ip=\"38\" full-text=\"41\" pdf=\"8\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"4\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510475\"><meta-data doi=\"10.7554/eLife.00347\" pmcid=\"PMC3510475\" pubmed-id=\"23240090\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00347\"/><usage unique-ip=\"17\" full-text=\"18\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510476\"><meta-data doi=\"10.7554/eLife.00353\" pmcid=\"PMC3510476\" pubmed-id=\"23240091\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00353\"/><usage unique-ip=\"22\" full-text=\"36\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"4\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3510477\"><meta-data doi=\"10.7554/eLife.00326\" pmcid=\"PMC3510477\" pubmed-id=\"23240088\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00326\"/><usage unique-ip=\"14\" full-text=\"10\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"6\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3514886\"><meta-data doi=\"10.7554/eLife.00109\" pmcid=\"PMC3514886\" pubmed-id=\"23240085\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00109\"/><usage unique-ip=\"34\" full-text=\"31\" pdf=\"11\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"15\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524649\"><meta-data doi=\"10.7554/eLife.00090\" pmcid=\"PMC3524649\" pubmed-id=\"23256041\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00090\"/><usage unique-ip=\"34\" full-text=\"37\" pdf=\"6\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"9\" supp-data=\"0\" cited-by=\"1\"/></article><article id=\"PMC3524793\"><meta-data doi=\"10.7554/eLife.00352\" pmcid=\"PMC3524793\" pubmed-id=\"23256044\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00352\"/><usage unique-ip=\"15\" full-text=\"18\" pdf=\"0\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524794\"><meta-data doi=\"10.7554/eLife.00093\" pmcid=\"PMC3524794\" pubmed-id=\"23251784\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00093\"/><usage unique-ip=\"19\" full-text=\"26\" pdf=\"1\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"1\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524795\"><meta-data doi=\"10.7554/eLife.00311\" pmcid=\"PMC3524795\" pubmed-id=\"23251785\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00311\"/><usage unique-ip=\"33\" full-text=\"38\" pdf=\"11\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"9\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524796\"><meta-data doi=\"10.7554/eLife.00181\" pmcid=\"PMC3524796\" pubmed-id=\"23256042\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00181\"/><usage unique-ip=\"25\" full-text=\"24\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"12\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524800\"><meta-data doi=\"10.7554/eLife.00386\" pmcid=\"PMC3524800\" pubmed-id=\"23256046\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00386\"/><usage unique-ip=\"15\" full-text=\"17\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"4\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524801\"><meta-data doi=\"10.7554/eLife.00047\" pmcid=\"PMC3524801\" pubmed-id=\"23251783\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00047\"/><usage unique-ip=\"47\" full-text=\"51\" pdf=\"16\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"18\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524826\"><meta-data doi=\"10.7554/eLife.00387\" pmcid=\"PMC3524826\" pubmed-id=\"23256047\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00387\"/><usage unique-ip=\"52\" full-text=\"60\" pdf=\"6\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"12\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524827\"><meta-data doi=\"10.7554/eLife.00385\" pmcid=\"PMC3524827\" pubmed-id=\"23256045\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00385\"/><usage unique-ip=\"13\" full-text=\"16\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3524939\"><meta-data doi=\"10.7554/eLife.00205\" pmcid=\"PMC3524939\" pubmed-id=\"23256043\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00205\"/><usage unique-ip=\"63\" full-text=\"75\" pdf=\"19\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"38\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3525924\"><meta-data doi=\"10.7554/eLife.00117\" pmcid=\"PMC3525924\" pubmed-id=\"23275833\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00117\"/><usage unique-ip=\"44\" full-text=\"46\" pdf=\"5\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"22\" supp-data=\"1\" cited-by=\"0\"/></article><article id=\"PMC3533262\"><meta-data doi=\"10.7554/eLife.00475\" pmcid=\"PMC3533262\" pubmed-id=\"23326638\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00475\"/><usage unique-ip=\"17\" full-text=\"19\" pdf=\"1\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3534202\"><meta-data doi=\"10.7554/eLife.00248\" pmcid=\"PMC3534202\" pubmed-id=\"23330067\" pub-year=\"2012\" volume=\"1\" issue=\"\" first-page=\"e00248\"/><usage unique-ip=\"34\" full-text=\"40\" pdf=\"9\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"12\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3539330\"><meta-data doi=\"10.7554/eLife.00450\" pmcid=\"PMC3539330\" pubmed-id=\"23326643\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00450\"/><usage unique-ip=\"19\" full-text=\"20\" pdf=\"1\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"1\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3539331\"><meta-data doi=\"10.7554/eLife.00452\" pmcid=\"PMC3539331\" pubmed-id=\"0\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00452\"/><usage unique-ip=\"11\" full-text=\"14\" pdf=\"0\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"1\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3539332\"><meta-data doi=\"10.7554/eLife.00160\" pmcid=\"PMC3539332\" pubmed-id=\"23326640\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00160\"/><usage unique-ip=\"45\" full-text=\"46\" pdf=\"11\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"17\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3539393\"><meta-data doi=\"10.7554/eLife.00170\" pmcid=\"PMC3539393\" pubmed-id=\"23326641\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00170\"/><usage unique-ip=\"67\" full-text=\"80\" pdf=\"12\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"36\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3545443\"><meta-data doi=\"10.7554/eLife.00231\" pmcid=\"PMC3545443\" pubmed-id=\"23326642\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00231\"/><usage unique-ip=\"43\" full-text=\"50\" pdf=\"8\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"22\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3545444\"><meta-data doi=\"10.7554/eLife.00116\" pmcid=\"PMC3545444\" pubmed-id=\"23326639\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00116\"/><usage unique-ip=\"68\" full-text=\"85\" pdf=\"14\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"16\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3552422\"><meta-data doi=\"10.7554/eLife.00012\" pmcid=\"PMC3552422\" pubmed-id=\"23359858\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00012\"/><usage unique-ip=\"48\" full-text=\"44\" pdf=\"14\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"8\" supp-data=\"0\" cited-by=\"1\"/></article><article id=\"PMC3552423\"><meta-data doi=\"10.7554/eLife.00308\" pmcid=\"PMC3552423\" pubmed-id=\"23358411\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00308\"/><usage unique-ip=\"71\" full-text=\"95\" pdf=\"17\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"23\" supp-data=\"3\" cited-by=\"0\"/></article><article id=\"PMC3552424\"><meta-data doi=\"10.7554/eLife.00178\" pmcid=\"PMC3552424\" pubmed-id=\"23359859\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00178\"/><usage unique-ip=\"64\" full-text=\"80\" pdf=\"16\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"51\" supp-data=\"11\" cited-by=\"1\"/></article><article id=\"PMC3552425\"><meta-data doi=\"10.7554/eLife.00476\" pmcid=\"PMC3552425\" pubmed-id=\"23358458\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00476\"/><usage unique-ip=\"24\" full-text=\"25\" pdf=\"1\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3552426\"><meta-data doi=\"10.7554/eLife.00477\" pmcid=\"PMC3552426\" pubmed-id=\"23359861\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00477\"/><usage unique-ip=\"22\" full-text=\"26\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"8\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3552427\"><meta-data doi=\"10.7554/eLife.00491\" pmcid=\"PMC3552427\" pubmed-id=\"23359862\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00491\"/><usage unique-ip=\"28\" full-text=\"32\" pdf=\"5\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"1\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3552618\"><meta-data doi=\"10.7554/eLife.00183\" pmcid=\"PMC3552618\" pubmed-id=\"23358702\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00183\"/><usage unique-ip=\"34\" full-text=\"39\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"4\" supp-data=\"73\" cited-by=\"0\"/></article><article id=\"PMC3552619\"><meta-data doi=\"10.7554/eLife.00230\" pmcid=\"PMC3552619\" pubmed-id=\"23359860\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00230\"/><usage unique-ip=\"34\" full-text=\"36\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"14\" cited-by=\"0\"/></article><article id=\"PMC3557904\"><meta-data doi=\"10.7554/eLife.00563\" pmcid=\"PMC3557904\" pubmed-id=\"23386979\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00563\"/><usage unique-ip=\"51\" full-text=\"60\" pdf=\"19\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"10\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3557905\"><meta-data doi=\"10.7554/eLife.00471\" pmcid=\"PMC3557905\" pubmed-id=\"23386978\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00471\"/><usage unique-ip=\"267\" full-text=\"265\" pdf=\"107\" abstract=\"3\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"111\" supp-data=\"13\" cited-by=\"2\"/></article><article id=\"PMC3564445\"><meta-data doi=\"10.7554/eLife.00515\" pmcid=\"PMC3564445\" pubmed-id=\"23390590\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00515\"/><usage unique-ip=\"22\" full-text=\"25\" pdf=\"0\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"4\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3564446\"><meta-data doi=\"10.7554/eLife.00306\" pmcid=\"PMC3564446\" pubmed-id=\"23390587\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00306\"/><usage unique-ip=\"41\" full-text=\"52\" pdf=\"13\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"11\" supp-data=\"1\" cited-by=\"0\"/></article><article id=\"PMC3564447\"><meta-data doi=\"10.7554/eLife.00329\" pmcid=\"PMC3564447\" pubmed-id=\"23390589\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00329\"/><usage unique-ip=\"57\" full-text=\"83\" pdf=\"13\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"24\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3564448\"><meta-data doi=\"10.7554/eLife.00321\" pmcid=\"PMC3564448\" pubmed-id=\"23390588\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00321\"/><usage unique-ip=\"16\" full-text=\"19\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"1\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3564474\"><meta-data doi=\"10.7554/eLife.00105\" pmcid=\"PMC3564474\" pubmed-id=\"23390586\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00105\"/><usage unique-ip=\"31\" full-text=\"34\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"8\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3569938\"><meta-data doi=\"10.7554/eLife.00565\" pmcid=\"PMC3569938\" pubmed-id=\"23408481\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00565\"/><usage unique-ip=\"12\" full-text=\"14\" pdf=\"0\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3576708\"><meta-data doi=\"10.7554/eLife.00573\" pmcid=\"PMC3576708\" pubmed-id=\"23426864\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00573\"/><usage unique-ip=\"31\" full-text=\"33\" pdf=\"8\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"6\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3576709\"><meta-data doi=\"10.7554/eLife.00571\" pmcid=\"PMC3576709\" pubmed-id=\"23426887\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00571\"/><usage unique-ip=\"21\" full-text=\"23\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3576710\"><meta-data doi=\"10.7554/eLife.00572\" pmcid=\"PMC3576710\" pubmed-id=\"23426937\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00572\"/><usage unique-ip=\"39\" full-text=\"35\" pdf=\"11\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"12\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3576711\"><meta-data doi=\"10.7554/eLife.00291\" pmcid=\"PMC3576711\" pubmed-id=\"23426999\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00291\"/><usage unique-ip=\"112\" full-text=\"133\" pdf=\"50\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"60\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3576727\"><meta-data doi=\"10.7554/eLife.00461\" pmcid=\"PMC3576727\" pubmed-id=\"23427024\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00461\"/><usage unique-ip=\"76\" full-text=\"79\" pdf=\"19\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"43\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3576809\"><meta-data doi=\"10.7554/eLife.00290\" pmcid=\"PMC3576809\" pubmed-id=\"23425906\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00290\"/><usage unique-ip=\"46\" full-text=\"58\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"27\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3578201\"><meta-data doi=\"10.7554/eLife.00333\" pmcid=\"PMC3578201\" pubmed-id=\"23550179\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00333\"/><usage unique-ip=\"78\" full-text=\"100\" pdf=\"22\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"39\" supp-data=\"1\" cited-by=\"0\"/></article><article id=\"PMC3579228\"><meta-data doi=\"10.7554/eLife.00488\" pmcid=\"PMC3579228\" pubmed-id=\"0\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00488\"/><usage unique-ip=\"12\" full-text=\"15\" pdf=\"0\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3582987\"><meta-data doi=\"10.7554/eLife.00593\" pmcid=\"PMC3582987\" pubmed-id=\"23467495\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00593\"/><usage unique-ip=\"26\" full-text=\"29\" pdf=\"7\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3582988\"><meta-data doi=\"10.7554/eLife.00400\" pmcid=\"PMC3582988\" pubmed-id=\"23467508\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00400\"/><usage unique-ip=\"53\" full-text=\"55\" pdf=\"14\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"23\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3583005\"><meta-data doi=\"10.7554/eLife.00348\" pmcid=\"PMC3583005\" pubmed-id=\"23467541\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00348\"/><usage unique-ip=\"96\" full-text=\"105\" pdf=\"22\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"27\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3589823\"><meta-data doi=\"10.7554/eLife.00615\" pmcid=\"PMC3589823\" pubmed-id=\"23470921\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00615\"/><usage unique-ip=\"22\" full-text=\"26\" pdf=\"2\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3589824\"><meta-data doi=\"10.7554/eLife.00337\" pmcid=\"PMC3589824\" pubmed-id=\"23471010\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00337\"/><usage unique-ip=\"46\" full-text=\"46\" pdf=\"11\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"5\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3589825\"><meta-data doi=\"10.7554/eLife.00327\" pmcid=\"PMC3589825\" pubmed-id=\"23471103\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00327\"/><usage unique-ip=\"59\" full-text=\"61\" pdf=\"15\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"25\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3591006\"><meta-data doi=\"10.7554/eLife.00133\" pmcid=\"PMC3591006\" pubmed-id=\"23482306\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00133\"/><usage unique-ip=\"68\" full-text=\"72\" pdf=\"22\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"32\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3591093\"><meta-data doi=\"10.7554/eLife.00218\" pmcid=\"PMC3591093\" pubmed-id=\"23483797\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00218\"/><usage unique-ip=\"48\" full-text=\"46\" pdf=\"11\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"23\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3591783\"><meta-data doi=\"10.7554/eLife.00190\" pmcid=\"PMC3591783\" pubmed-id=\"23482940\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00190\"/><usage unique-ip=\"42\" full-text=\"43\" pdf=\"8\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"8\" supp-data=\"2\" cited-by=\"0\"/></article><article id=\"PMC3592195\"><meta-data doi=\"10.7554/eLife.00577\" pmcid=\"PMC3592195\" pubmed-id=\"23538671\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00577\"/><usage unique-ip=\"46\" full-text=\"47\" pdf=\"7\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"10\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3594797\"><meta-data doi=\"10.7554/eLife.00646\" pmcid=\"PMC3594797\" pubmed-id=\"23538735\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00646\"/><usage unique-ip=\"18\" full-text=\"21\" pdf=\"0\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3601633\"><meta-data doi=\"10.7554/eLife.00638\" pmcid=\"PMC3601633\" pubmed-id=\"23538852\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00638\"/><usage unique-ip=\"39\" full-text=\"41\" pdf=\"7\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"19\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3601634\"><meta-data doi=\"10.7554/eLife.00641\" pmcid=\"PMC3601634\" pubmed-id=\"23539117\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00641\"/><usage unique-ip=\"62\" full-text=\"65\" pdf=\"14\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"16\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3601810\"><meta-data doi=\"10.7554/eLife.00336\" pmcid=\"PMC3601810\" pubmed-id=\"23539289\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00336\"/><usage unique-ip=\"98\" full-text=\"108\" pdf=\"24\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"56\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3601818\"><meta-data doi=\"10.7554/eLife.00378\" pmcid=\"PMC3601818\" pubmed-id=\"23539368\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00378\"/><usage unique-ip=\"78\" full-text=\"89\" pdf=\"21\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"18\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3601819\"><meta-data doi=\"10.7554/eLife.00354\" pmcid=\"PMC3601819\" pubmed-id=\"23539454\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00354\"/><usage unique-ip=\"103\" full-text=\"140\" pdf=\"27\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"36\" supp-data=\"4\" cited-by=\"0\"/></article><article id=\"PMC3602953\"><meta-data doi=\"10.7554/eLife.00605\" pmcid=\"PMC3602953\" pubmed-id=\"23538878\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00605\"/><usage unique-ip=\"65\" full-text=\"76\" pdf=\"17\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"18\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3602954\"><meta-data doi=\"10.7554/eLife.00312\" pmcid=\"PMC3602954\" pubmed-id=\"23538967\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00312\"/><usage unique-ip=\"158\" full-text=\"165\" pdf=\"58\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"70\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3608243\"><meta-data doi=\"10.7554/eLife.00648\" pmcid=\"PMC3608243\" pubmed-id=\"23539513\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00648\"/><usage unique-ip=\"71\" full-text=\"69\" pdf=\"16\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"20\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3608244\"><meta-data doi=\"10.7554/eLife.00642\" pmcid=\"PMC3608244\" pubmed-id=\"23539544\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00642\"/><usage unique-ip=\"40\" full-text=\"49\" pdf=\"7\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"0\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3608245\"><meta-data doi=\"10.7554/eLife.00625\" pmcid=\"PMC3608245\" pubmed-id=\"23539644\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00625\"/><usage unique-ip=\"46\" full-text=\"51\" pdf=\"16\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"7\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3608266\"><meta-data doi=\"10.7554/eLife.00260\" pmcid=\"PMC3608266\" pubmed-id=\"23538384\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00260\"/><usage unique-ip=\"103\" full-text=\"122\" pdf=\"45\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"39\" supp-data=\"6\" cited-by=\"0\"/></article><article id=\"PMC3610343\"><meta-data doi=\"10.7554/eLife.00269\" pmcid=\"PMC3610343\" pubmed-id=\"23543845\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00269\"/><usage unique-ip=\"57\" full-text=\"63\" pdf=\"10\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"9\" supp-data=\"2\" cited-by=\"0\"/></article><article id=\"PMC3614016\"><meta-data doi=\"10.7554/eLife.00278\" pmcid=\"PMC3614016\" pubmed-id=\"23577232\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00278\"/><usage unique-ip=\"39\" full-text=\"42\" pdf=\"7\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"16\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3614024\"><meta-data doi=\"10.7554/eLife.00655\" pmcid=\"PMC3614024\" pubmed-id=\"23577236\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00655\"/><usage unique-ip=\"27\" full-text=\"31\" pdf=\"5\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3614025\"><meta-data doi=\"10.7554/eLife.00435\" pmcid=\"PMC3614025\" pubmed-id=\"23577234\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00435\"/><usage unique-ip=\"100\" full-text=\"104\" pdf=\"32\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"62\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3614033\"><meta-data doi=\"10.7554/eLife.00367\" pmcid=\"PMC3614033\" pubmed-id=\"23577233\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00367\"/><usage unique-ip=\"49\" full-text=\"64\" pdf=\"15\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"9\" supp-data=\"8\" cited-by=\"0\"/></article><article id=\"PMC3614058\"><meta-data doi=\"10.7554/eLife.00639\" pmcid=\"PMC3614058\" pubmed-id=\"23577235\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00639\"/><usage unique-ip=\"46\" full-text=\"64\" pdf=\"8\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3622176\"><meta-data doi=\"10.7554/eLife.00692\" pmcid=\"PMC3622176\" pubmed-id=\"23580166\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00692\"/><usage unique-ip=\"27\" full-text=\"33\" pdf=\"5\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"5\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3622177\"><meta-data doi=\"10.7554/eLife.00444\" pmcid=\"PMC3622177\" pubmed-id=\"23580231\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00444\"/><usage unique-ip=\"60\" full-text=\"69\" pdf=\"22\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"26\" supp-data=\"4\" cited-by=\"0\"/></article><article id=\"PMC3622178\"><meta-data doi=\"10.7554/eLife.00426\" pmcid=\"PMC3622178\" pubmed-id=\"23580255\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00426\"/><usage unique-ip=\"63\" full-text=\"88\" pdf=\"18\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"29\" supp-data=\"5\" cited-by=\"0\"/></article><article id=\"PMC3622181\"><meta-data doi=\"10.7554/eLife.00499\" pmcid=\"PMC3622181\" pubmed-id=\"23580326\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00499\"/><usage unique-ip=\"80\" full-text=\"111\" pdf=\"37\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"22\" supp-data=\"0\" cited-by=\"1\"/></article><article id=\"PMC3622228\"><meta-data doi=\"10.7554/eLife.00659\" pmcid=\"PMC3622228\" pubmed-id=\"23580350\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00659\"/><usage unique-ip=\"37\" full-text=\"37\" pdf=\"10\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"11\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3622229\"><meta-data doi=\"10.7554/eLife.00663\" pmcid=\"PMC3622229\" pubmed-id=\"23580362\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00663\"/><usage unique-ip=\"46\" full-text=\"51\" pdf=\"15\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"11\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3628084\"><meta-data doi=\"10.7554/eLife.00415\" pmcid=\"PMC3628084\" pubmed-id=\"23599892\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00415\"/><usage unique-ip=\"40\" full-text=\"40\" pdf=\"10\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"14\" supp-data=\"0\" cited-by=\"1\"/></article><article id=\"PMC3628085\"><meta-data doi=\"10.7554/eLife.00458\" pmcid=\"PMC3628085\" pubmed-id=\"23599893\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00458\"/><usage unique-ip=\"120\" full-text=\"124\" pdf=\"36\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"43\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3628086\"><meta-data doi=\"10.7554/eLife.00362\" pmcid=\"PMC3628086\" pubmed-id=\"23599891\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00362\"/><usage unique-ip=\"91\" full-text=\"120\" pdf=\"24\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"64\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3628087\"><meta-data doi=\"10.7554/eLife.00534\" pmcid=\"PMC3628087\" pubmed-id=\"23599896\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00534\"/><usage unique-ip=\"72\" full-text=\"72\" pdf=\"16\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"19\" supp-data=\"1\" cited-by=\"0\"/></article><article id=\"PMC3628110\"><meta-data doi=\"10.7554/eLife.00482\" pmcid=\"PMC3628110\" pubmed-id=\"23599895\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00482\"/><usage unique-ip=\"44\" full-text=\"51\" pdf=\"1\" abstract=\"1\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"21\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3628404\"><meta-data doi=\"10.7554/eLife.00729\" pmcid=\"PMC3628404\" pubmed-id=\"23599898\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00729\"/><usage unique-ip=\"62\" full-text=\"100\" pdf=\"12\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"8\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3628405\"><meta-data doi=\"10.7554/eLife.00459\" pmcid=\"PMC3628405\" pubmed-id=\"23599894\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00459\"/><usage unique-ip=\"39\" full-text=\"42\" pdf=\"9\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"45\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3628440\"><meta-data doi=\"10.7554/eLife.00676\" pmcid=\"PMC3628440\" pubmed-id=\"23599897\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00676\"/><usage unique-ip=\"18\" full-text=\"23\" pdf=\"3\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"2\" supp-data=\"0\" cited-by=\"0\"/></article><article id=\"PMC3629793\"><meta-data doi=\"10.7554/eLife.00288\" pmcid=\"PMC3629793\" pubmed-id=\"23606943\" pub-year=\"2013\" volume=\"2\" issue=\"\" first-page=\"e00288\"/><usage unique-ip=\"35\" full-text=\"41\" pdf=\"4\" abstract=\"0\" scanned-summary=\"0\" scanned-page-browse=\"0\" figure=\"11\" supp-data=\"3\" cited-by=\"0\"/></article></articles></pmc-web-stat>",
"provider_raw_version": 1,
"max_event_date": "2013-04-30T23:59:59.999999",
"provider": "pmc",
"aliases": {
"pmid": [
"23066504",
"23066507",
"23066508",
"23066509",
"23066506",
"23066503"
]
},
"type": "provider_data_dump",
"min_event_date": "2013-04-01T00:00:00",
"created": "2013-05-16T07:38:06.831119"
}
def tearDown(self):
teardown_postgres_for_unittests(self.db)
def test_make_provider_batch_data(self):
#make sure nothing there beforehand
matching = ProviderBatchData.query.filter_by(provider="pmc").first()
assert_equals(matching, None)
new_batch_data = ProviderBatchData(**self.test_data)
print new_batch_data
# still not there
matching = ProviderBatchData.query.filter_by(provider="pmc").first()
assert_equals(matching, None)
self.db.session.add(new_batch_data)
self.db.session.commit()
# and now poof there it is
matching = ProviderBatchData.query.filter_by(provider="pmc").first()
assert_equals(matching.provider, "pmc")
assert_equals(matching.aliases, self.test_data["aliases"])
def test_create_object_from_doc(self):
new_object = provider_batch_data.create_objects_from_doc(self.old_doc)
matching = ProviderBatchData.query.filter_by(provider="pmc").first()
assert_equals(matching.provider, "pmc")
assert_equals(matching.aliases, self.old_doc["aliases"])
| mit |
markusappel/McCode | tools/Python/obsoleted/mcdisplay-x3d/rewrite.py | 3 | 5048 | #!/usr/bin/env python
''' Small script to rewrite McStas trace output to CSV data for plotting '''
import argparse
import sys
import numpy as np
import x3d
from util import parse_multiline, rotate, get_line, debug, draw_circle
UC_COMP = 'COMPONENT:'
MC_COMP = 'MCDISPLAY: component'
MC_COMP_SHORT = 'COMP: '
MC_LINE = 'MCDISPLAY: multiline'
MC_CIRCLE = 'MCDISPLAY: circle'
MC_ENTER = 'ENTER:'
MC_LEAVE = 'LEAVE:'
MC_STATE = 'STATE:'
MC_SCATTER = 'SCATTER:'
MC_ABSORB = 'ABSORB:'
colors = ["1.0, 0.0, 0.0","0.0, 1.0, 0.0","0.0, 0.0, 1.0",
"1.0, 1.0, 0.0","1.0, 0.0, 1.0","0.0, 1.0, 1.0",
"1.0, 1.0, 1.0","0.5, 1.0, 1.0","1.0, 0.5, 1.0",
"1.0, 1.0, 0.5","0.5, 0.0, 1.0","0.0, 0.5, 1.0",
"0.0, 1.0, 0.5","0.5, 1.0, 0.0","1.0, 0.5, 0.0",
"1.0, 0.0, 0.5","0.5, 0.0, 0.0","0.0, 0.5, 0.0",
"0.0, 0.0, 0.5","0.5, 0.5, 1.0","0.5, 1.0, 0.5",
"1.0, 0.5, 0.5","0.5, 0.0, 0.5","0.0, 0.5, 0.5",
"0.5, 0.5, 0.0","0.5, 0.5, 0.5"]
def getColor(n):
return colors[n % len(colors)]
def parse_trace(world, fp=sys.stdin, inspectComp=None):
''' Prase McStas trace output from stdin and write result to output '''
color = 0
# def out_point((p_x, p_y, p_z)):
# ''' Write a line to csv_lines '''
# csv_lines.write('%s, %s, %s, %s\n' % (p_x, p_y, p_z, color))
# print headers
# csv_comps.write('name, x, y, z\n')
# csv_lines.write('x, y, z, c\n')
# map from component name to (position, rotation matrix)
comps = {}
# active (position, rotation matrix)
comp = (np.array([0, 0, 0]),
np.array([1, 0, 0,
0, 1, 0,
0, 0, 1]).reshape(3,3))
compName = ""
# we are following a neutron
active = False
# we need to draw the neutron (it passed the "check-point"/inspect component)
inspect = False
# list of observed neutron positions
neutron = []
# skip next neutron position
skip = False
# total count of drawed neutrons
neutrons_drawed = 0
while True:
# read line
line = get_line(fp)
if line is None:
break
# register components
if line.startswith(UC_COMP):
# grab info line
info = get_line(fp)
assert info[:4] == 'POS:'
nums = [x.strip() for x in info[4:].split(',')]
# extract fields
name = line[len(UC_COMP):].strip(' "\n')
pos = np.array([float(x) for x in nums[:3]])
# read flat 3x3 rotation matrix
rot = np.array([float(x) for x in nums[3:3+9]]).reshape(3, 3)
comps[name] = (pos, rot)
# csv_comps.write('%s, %s, %s, %s\n' % ((name,) + tuple(pos)))
# switch perspective
elif line.startswith(MC_COMP):
color += 1
name = line[len(MC_COMP) + 1:].strip()
compName = name
comp = comps[name]
elif line.startswith(MC_COMP_SHORT):
name = line[len(MC_COMP_SHORT) + 1:].strip('"')
compName = name
comp = comps[name]
skip = True
# process multiline
elif line.startswith(MC_LINE):
points = parse_multiline(line[len(MC_LINE):].strip('()'))
world.drawLine((rotate(p, comp) for p in points), color=getColor(color))
# process circle
elif line.startswith(MC_CIRCLE):
xyz = 'xyz'
items = line[len(MC_CIRCLE):].strip('()').split(',')
# plane
pla = [xyz.find(a) for a in items[0].strip("''")]
# center and radius
pos = [float(x) for x in items[1:4]]
rad = float(items[4])
points = draw_circle(pla, pos, rad, comp)
world.drawLine(points, color=getColor(color))
# activate neutron when it enters
elif line.startswith(MC_ENTER):
neutron = []
skip = True
active = True
inspect = False
color += 1
# deactivate neutron when it leaves
elif line.startswith(MC_LEAVE) or line.startswith(MC_ABSORB):
active = False
if inspectComp is None or inspect:
world.drawLine(neutron, color="1 0 0")
neutrons_drawed += 1
# register state and scatter
elif line.startswith(MC_STATE) or line.startswith(MC_SCATTER):
if not active:
continue
if skip:
skip = False
continue
if inspectComp and inspectComp == compName:
# We will draw this neutron!
inspect = True
# keep track of points the neutron passes through
xyz = [float(x) for x in line[line.find(':')+1:].split(',')[:3]]
xyz = rotate(xyz, comp)
neutron.append(xyz)
print('Neutrons drawed:', neutrons_drawed, (inspectComp and '(reaching %s)' % inspectComp or '(all)'))
return world
| gpl-2.0 |
Juniper/contrail-dev-controller | src/config/vnc_openstack/vnc_openstack/tests/test_basic.py | 5 | 2533 | import sys
import json
sys.path.append('../common/tests')
from testtools.matchers import Equals, Contains
from test_utils import *
import test_common
import test_case
class NBTestExtraFieldsPresenceCodeDefault(test_case.NeutronBackendTestCase):
def test_extra_fields_on_network(self):
test_obj = self._create_test_object()
context = {'operation': 'READ',
'user_id': '',
'roles': ''}
data = {'fields': None,
'id': test_obj.uuid}
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/network', body)
net_dict = json.loads(resp.text)
self.assertIn('contrail:fq_name', net_dict)
# end test_extra_fields_on_network
# end class NBTestExtraFieldsPresenceCodeDefault
class NBTestExtraFieldsPresenceByKnob(test_case.NeutronBackendTestCase):
def __init__(self, *args, **kwargs):
super(NBTestExtraFieldsPresenceByKnob, self).__init__(*args, **kwargs)
self._config_knobs.append(('NEUTRON', 'contrail_extensions_enabled', True))
# end __init__
def test_extra_fields_on_network(self):
test_obj = self._create_test_object()
context = {'operation': 'READ',
'user_id': '',
'roles': ''}
data = {'fields': None,
'id': test_obj.uuid}
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/network', body)
net_dict = json.loads(resp.text)
self.assertIn('contrail:fq_name', net_dict)
# end test_extra_fields_on_network
# end class NBTestExtraFieldsPresenceByKnob
class NBTestExtraFieldsAbsenceByKnob(test_case.NeutronBackendTestCase):
def __init__(self, *args, **kwargs):
super(NBTestExtraFieldsAbsenceByKnob, self).__init__(*args, **kwargs)
self._config_knobs.append(('NEUTRON', 'contrail_extensions_enabled', False))
# end __init__
def test_no_extra_fields_on_network(self):
test_obj = self._create_test_object()
context = {'operation': 'READ',
'user_id': '',
'roles': ''}
data = {'fields': None,
'id': test_obj.uuid}
body = {'context': context, 'data': data}
resp = self._api_svr_app.post_json('/neutron/network', body)
net_dict = json.loads(resp.text)
self.assertNotIn('contrail:fq_name', net_dict)
# end test_extra_fields_on_network
# end class NBTestExtraFieldsAbsenceByKnob
| apache-2.0 |
mattbernst/polyhartree | support/ansible/runner/lookup_plugins/dnstxt.py | 166 | 2221 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
HAVE_DNS=False
try:
import dns.resolver
from dns.exception import DNSException
HAVE_DNS=True
except ImportError:
pass
# ==============================================================
# DNSTXT: DNS TXT records
#
# key=domainname
# TODO: configurable resolver IPs
# --------------------------------------------------------------
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if HAVE_DNS == False:
raise errors.AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
domain = term.split()[0]
string = []
try:
answers = dns.resolver.query(domain, 'TXT')
for rdata in answers:
s = rdata.to_text()
string.append(s[1:-1]) # Strip outside quotes on TXT rdata
except dns.resolver.NXDOMAIN:
string = 'NXDOMAIN'
except dns.resolver.Timeout:
string = ''
except dns.exception.DNSException, e:
raise errors.AnsibleError("dns.resolver unhandled exception", e)
ret.append(''.join(string))
return ret
| gpl-3.0 |
a9261/flexx | flexx/ui/test.py | 20 | 6110 | import time
import logging
import flexx
from flexx import app, ui
import faulthandler
faulthandler.enable()
#logging.log
class MyApp(ui.Widget):
#_config = ui.App.Config(title='Flexx test app', size=(400, 300),
# )#icon='https://assets-cdn.github.com/favicon.ico')
def init(self):
#self.b0 = ui.Button(self, 'This is behind the box layout')
TEST = 3
if TEST == 1:
with ui.VBox(self, flex=1) as self.hbox1:
self.b1 = ui.Button(text='Hola', flex=1)
self.b2 = ui.Button(text='Hello world', flex=0)
self.b2 = ui.Button(text='Hello world', flex=0)
self.b2 = ui.Button(text='Hello world', flex=0)
self.b3 = ui.Button(text='Foo bar', flex=1)
if TEST == 2:
with self:
with ui.HBox():
ui.Widget(flex=1)
with ui.VBox(flex=0) as self.vbox:
ui.Label(text='Flex 0 0 0', flex=0)
with ui.HBox(flex=0) as self.hbox2:
self.b1 = ui.Button(text='Hola', flex=0)
self.b2 = ui.Button(text='Hello world', flex=0)
self.b3 = ui.Button(text='Foo bar', flex=0)
ui.Label(text='Flex 1 0 3', flex=0)
with ui.HBox(flex=0) as self.hbox1:
self.b1 = ui.Button(text='Hola', flex=1)
self.b2 = ui.Button(text='Hello world', flex=0)
self.b3 = ui.Button(text='Foo bar', flex=3)
ui.Label(text='margin 10 (around layout)', flex=0)
with ui.HBox(flex=0, margin=10) as self.hbox2:
self.b1 = ui.Button(text='Hola', flex=0)
self.b2 = ui.Button(text='Hello world', flex=0)
self.b3 = ui.Button(text='Foo bar', flex=0)
ui.Label(text='spacing 10 (inter-widget)', flex=0)
with ui.HBox(flex=0, spacing=10) as self.hbox2:
self.b1 = ui.Button(text='Hola', flex=0)
self.b2 = ui.Button(text='Hello world', flex=0)
self.b3 = ui.Button(text='Foo bar', flex=0)
ui.Widget(flex=1)
ui.Label(text='Note the spacer Widget above', flex=0)
if TEST == 3:
with ui.HBox(self, spacing=20):
with ui.FormLayout() as self.form:
# todo: can this be written with one line per row?
# e.g. self.b1 = ui.Button(label='Name', text='Hola')
ui.Label(text='Name:')
self.b1 = ui.Button(text='Hola')
ui.Label(text='Age:')
self.b2 = ui.Button(text='Hello world')
ui.Label(text='Favorite color:')
self.b3 = ui.Button(text='Foo bar')
with ui.FormLayout() as self.form:
# e.g. self.b1 = ui.Button(label='Name', text='Hola')
ui.Widget(flex=1) # Add a flexer
ui.Widget()
ui.Label(text='Pet name:')
self.b1 = ui.Button(text='Hola')
ui.Label(text='Pet Age:')
self.b2 = ui.Button(text='Hello world')
ui.Label(text='Pet\'s Favorite color:')
self.b3 = ui.Button(text='Foo bar')
ui.Widget(flex=1); ui.Widget()
if TEST == 4:
with ui.GridLayout(self) as self.grid:
self.b1 = ui.Button(text='No flex', pos=(0, 0))
self.b2 = ui.Button(text='Hola', pos=(1, 1), flex=(1, 1))
self.b3 = ui.Button(text='Hello world', pos=(2, 2), flex=(2, 1))
self.b4 = ui.Button(text='Foo bar', pos=(4, 4), flex=(1, 2))
self.b5 = ui.Button(text='no flex again', pos=(5, 5))
if TEST == 5:
with ui.PinboardLayout(self) as self.grid:
self.b1 = ui.Button(text='Stuck at (20, 20)', pos=(20, 30))
self.b2 = ui.Button(text='Dynamic at (20%, 20%)', pos=(0.2, 0.2))
self.b3 = ui.Button(text='Dynamic at (50%, 70%)', pos=(0.5, 0.7))
if TEST == 6:
with ui.HSplitter(self):
ui.Button(text='Right A', min_size=(120, 0))
ui.Button(text='Right B', min_size=(70, 0))
ui.Button(text='Right C')
# ui.Button(text='Right D', flex=2)
# ui.Button(text='Right E', flex=2)
# ui.Button(text='Right F', flex=2)
if TEST == 7:
with ui.HBox(self):
ui.Button(text='Button in hbox', flex=0)
with ui.HSplit(flex=1):
ui.Button(text='Button in splitter', min_size=(100, 0))
with ui.HBox(min_size=(100, 0)):
ui.Button(text='Right A', flex=0, css='background:#f00; padding:2em;')
ui.Button(text='Right B', flex=1)
ui.Button(text='Right C', flex=2)
if TEST == 8:
with ui.MenuBar(self):
with ui.MenuItem(text='File'):
ui.MenuItem(text='New')
ui.MenuItem(text='Open')
ui.MenuItem(text='Save')
with ui.MenuItem(text='Edit'):
ui.MenuItem(text='Cut')
ui.MenuItem(text='Copy')
ui.MenuItem(text='Paste')
#self.win = ui.Window(self, 'A new window!')
a = app.launch(MyApp, 'firefox')
app.start()
# app.b1.set_text('asdasd')
# MyApp.export('/home/almar/test.html')
| bsd-2-clause |
hoangt/tpzsimul.gem5 | src/dev/arm/Gic.py | 24 | 2982 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from m5.proxy import *
from Device import PioDevice
from Platform import Platform
class BaseGic(PioDevice):
type = 'BaseGic'
abstract = True
cxx_header = "dev/arm/base_gic.hh"
platform = Param.Platform(Parent.any, "Platform this device is part of.")
class Pl390(BaseGic):
type = 'Pl390'
cxx_header = "dev/arm/gic_pl390.hh"
dist_addr = Param.Addr(0x1f001000, "Address for distributor")
cpu_addr = Param.Addr(0x1f000100, "Address for cpu")
msix_addr = Param.Addr(0x0, "Address for MSI-X register")
dist_pio_delay = Param.Latency('10ns', "Delay for PIO r/w to distributor")
cpu_pio_delay = Param.Latency('10ns', "Delay for PIO r/w to cpu interface")
int_latency = Param.Latency('10ns', "Delay for interrupt to get to CPU")
it_lines = Param.UInt32(128, "Number of interrupt lines supported (max = 1020)")
| bsd-3-clause |
nwswanson/epulorimiter | lib/wtforms/ext/appengine/ndb.py | 174 | 17124 | """
Form generation utilities for App Engine's new ``ndb.Model`` class.
The goal of ``model_form()`` is to provide a clean, explicit and predictable
way to create forms based on ``ndb.Model`` classes. No malabarism or black
magic should be necessary to generate a form for models, and to add custom
non-model related fields: ``model_form()`` simply generates a form class
that can be used as it is, or that can be extended directly or even be used
to create other forms using ``model_form()``.
Example usage:
.. code-block:: python
from google.appengine.ext import ndb
from wtforms.ext.appengine.ndb import model_form
# Define an example model and add a record.
class Contact(ndb.Model):
name = ndb.StringProperty(required=True)
city = ndb.StringProperty()
age = ndb.IntegerProperty(required=True)
is_admin = ndb.BooleanProperty(default=False)
new_entity = Contact(key_name='test', name='Test Name', age=17)
new_entity.put()
# Generate a form based on the model.
ContactForm = model_form(Contact)
# Get a form populated with entity data.
entity = Contact.get_by_key_name('test')
form = ContactForm(obj=entity)
Properties from the model can be excluded from the generated form, or it can
include just a set of properties. For example:
.. code-block:: python
# Generate a form based on the model, excluding 'city' and 'is_admin'.
ContactForm = model_form(Contact, exclude=('city', 'is_admin'))
# or...
# Generate a form based on the model, only including 'name' and 'age'.
ContactForm = model_form(Contact, only=('name', 'age'))
The form can be generated setting field arguments:
.. code-block:: python
ContactForm = model_form(Contact, only=('name', 'age'), field_args={
'name': {
'label': 'Full name',
'description': 'Your name',
},
'age': {
'label': 'Age',
'validators': [validators.NumberRange(min=14, max=99)],
}
})
The class returned by ``model_form()`` can be used as a base class for forms
mixing non-model fields and/or other model forms. For example:
.. code-block:: python
# Generate a form based on the model.
BaseContactForm = model_form(Contact)
# Generate a form based on other model.
ExtraContactForm = model_form(MyOtherModel)
class ContactForm(BaseContactForm):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Add the other model form as a subform.
extra = f.FormField(ExtraContactForm)
The class returned by ``model_form()`` can also extend an existing form
class:
.. code-block:: python
class BaseContactForm(Form):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Generate a form based on the model.
ContactForm = model_form(Contact, base_class=BaseContactForm)
"""
from wtforms import Form, validators, fields as f
from wtforms.compat import string_types
from wtforms.ext.appengine.fields import GeoPtPropertyField, KeyPropertyField, StringListPropertyField, IntegerListPropertyField
def get_TextField(kwargs):
"""
Returns a ``TextField``, applying the ``ndb.StringProperty`` length limit
of 500 bytes.
"""
kwargs['validators'].append(validators.length(max=500))
return f.TextField(**kwargs)
def get_IntegerField(kwargs):
"""
Returns an ``IntegerField``, applying the ``ndb.IntegerProperty`` range
limits.
"""
v = validators.NumberRange(min=-0x8000000000000000, max=0x7fffffffffffffff)
kwargs['validators'].append(v)
return f.IntegerField(**kwargs)
class ModelConverterBase(object):
def __init__(self, converters=None):
"""
Constructs the converter, setting the converter callables.
:param converters:
A dictionary of converter callables for each property type. The
callable must accept the arguments (model, prop, kwargs).
"""
self.converters = {}
for name in dir(self):
if not name.startswith('convert_'):
continue
self.converters[name[8:]] = getattr(self, name)
def convert(self, model, prop, field_args):
"""
Returns a form field for a single model property.
:param model:
The ``db.Model`` class that contains the property.
:param prop:
The model property: a ``db.Property`` instance.
:param field_args:
Optional keyword arguments to construct the field.
"""
prop_type_name = type(prop).__name__
# Check for generic property
if(prop_type_name == "GenericProperty"):
# Try to get type from field args
generic_type = field_args.get("type")
if generic_type:
prop_type_name = field_args.get("type")
# If no type is found, the generic property uses string set in convert_GenericProperty
kwargs = {
'label': prop._code_name.replace('_', ' ').title(),
'default': prop._default,
'validators': [],
}
if field_args:
kwargs.update(field_args)
if prop._required and prop_type_name not in self.NO_AUTO_REQUIRED:
kwargs['validators'].append(validators.required())
if kwargs.get('choices', None):
# Use choices in a select field.
kwargs['choices'] = [(v, v) for v in kwargs.get('choices')]
return f.SelectField(**kwargs)
if prop._choices:
# Use choices in a select field.
kwargs['choices'] = [(v, v) for v in prop._choices]
return f.SelectField(**kwargs)
else:
converter = self.converters.get(prop_type_name, None)
if converter is not None:
return converter(model, prop, kwargs)
else:
return self.fallback_converter(model, prop, kwargs)
class ModelConverter(ModelConverterBase):
"""
Converts properties from a ``ndb.Model`` class to form fields.
Default conversions between properties and fields:
+====================+===================+==============+==================+
| Property subclass | Field subclass | datatype | notes |
+====================+===================+==============+==================+
| StringProperty | TextField | unicode | TextArea | repeated support
| | | | if multiline |
+--------------------+-------------------+--------------+------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+------------------+
| IntegerProperty | IntegerField | int or long | | repeated support
+--------------------+-------------------+--------------+------------------+
| FloatProperty | TextField | float | |
+--------------------+-------------------+--------------+------------------+
| DateTimeProperty | DateTimeField | datetime | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| DateProperty | DateField | date | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TimeProperty | DateTimeField | time | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TextProperty | TextAreaField | unicode | |
+--------------------+-------------------+--------------+------------------+
| GeoPtProperty | TextField | db.GeoPt | |
+--------------------+-------------------+--------------+------------------+
| KeyProperty | KeyProperyField | ndb.Key | |
+--------------------+-------------------+--------------+------------------+
| BlobKeyProperty | None | ndb.BlobKey | always skipped |
+--------------------+-------------------+--------------+------------------+
| UserProperty | None | users.User | always skipped |
+--------------------+-------------------+--------------+------------------+
| StructuredProperty | None | ndb.Model | always skipped |
+--------------------+-------------------+--------------+------------------+
| LocalStructuredPro | None | ndb.Model | always skipped |
+--------------------+-------------------+--------------+------------------+
| JsonProperty | TextField | unicode | |
+--------------------+-------------------+--------------+------------------+
| PickleProperty | None | bytedata | always skipped |
+--------------------+-------------------+--------------+------------------+
| GenericProperty | None | generic | always skipped |
+--------------------+-------------------+--------------+------------------+
| ComputedProperty | none | | always skipped |
+====================+===================+==============+==================+
"""
# Don't automatically add a required validator for these properties
NO_AUTO_REQUIRED = frozenset(['ListProperty', 'StringListProperty', 'BooleanProperty'])
def convert_StringProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.StringProperty``."""
if prop._repeated:
return StringListPropertyField(**kwargs)
kwargs['validators'].append(validators.length(max=500))
return get_TextField(kwargs)
def convert_BooleanProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BooleanProperty``."""
return f.BooleanField(**kwargs)
def convert_IntegerProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.IntegerProperty``."""
if prop._repeated:
return IntegerListPropertyField(**kwargs)
return get_IntegerField(kwargs)
def convert_FloatProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.FloatProperty``."""
return f.FloatField(**kwargs)
def convert_DateTimeProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.DateTimeProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateTimeField(format='%Y-%m-%d %H:%M:%S', **kwargs)
def convert_DateProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.DateProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateField(format='%Y-%m-%d', **kwargs)
def convert_TimeProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.TimeProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
return f.DateTimeField(format='%H:%M:%S', **kwargs)
def convert_RepeatedProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_UserProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.UserProperty``."""
return None
def convert_StructuredProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_LocalStructuredProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_JsonProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_PickleProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
return None
def convert_GenericProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ListProperty``."""
kwargs['validators'].append(validators.length(max=500))
return get_TextField(kwargs)
def convert_BlobKeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BlobKeyProperty``."""
return f.FileField(**kwargs)
def convert_TextProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.TextProperty``."""
return f.TextAreaField(**kwargs)
def convert_ComputedProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ComputedProperty``."""
return None
def convert_GeoPtProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.GeoPtProperty``."""
return GeoPtPropertyField(**kwargs)
def convert_KeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.KeyProperty``."""
if 'reference_class' not in kwargs:
try:
reference_class = prop._kind
except AttributeError:
reference_class = prop._reference_class
if isinstance(reference_class, string_types):
# reference class is a string, try to retrieve the model object.
mod = __import__(model.__module__, None, None, [reference_class], 0)
reference_class = getattr(mod, reference_class)
kwargs['reference_class'] = reference_class
kwargs.setdefault('allow_blank', not prop._required)
return KeyPropertyField(**kwargs)
def model_fields(model, only=None, exclude=None, field_args=None,
converter=None):
"""
Extracts and returns a dictionary of form fields for a given
``db.Model`` class.
:param model:
The ``db.Model`` class to extract fields from.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to a keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
# Get the field names we want to include or exclude, starting with the
# full list of model properties.
props = model._properties
field_names = list(x[0] for x in sorted(props.items(), key=lambda x: x[1]._creation_counter))
if only:
field_names = list(f for f in only if f in field_names)
elif exclude:
field_names = list(f for f in field_names if f not in exclude)
# Create all fields.
field_dict = {}
for name in field_names:
field = converter.convert(model, props[name], field_args.get(name))
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, base_class=Form, only=None, exclude=None, field_args=None,
converter=None):
"""
Creates and returns a dynamic ``wtforms.Form`` class for a given
``ndb.Model`` class. The form class can be used as it is or serve as a base
for extended form classes, which can then mix non-model related fields,
subforms with other model forms, among other possibilities.
:param model:
The ``ndb.Model`` class to generate a form for.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
# Extract the fields from the model.
field_dict = model_fields(model, only, exclude, field_args, converter)
# Return a dynamically created form class, extending from base_class and
# including the created fields as properties.
return type(model._get_kind() + 'Form', (base_class,), field_dict)
| gpl-2.0 |
mushkevych/scheduler | tests/test_site_monthly_aggregator.py | 1 | 1475 | __author__ = 'Bohdan Mushkevych'
import unittest
from settings import enable_test_mode
enable_test_mode()
from db.model.site_statistics import DOMAIN_NAME, TIMEPERIOD
from constants import PROCESS_SITE_MONTHLY
from tests import daily_fixtures
from tests import monthly_fixtures
from tests.test_abstract_worker import AbstractWorkerUnitTest
from workers.site_monthly_aggregator import SiteMonthlyAggregator
class SiteMonthlyAggregatorUnitTest(AbstractWorkerUnitTest):
def virtual_set_up(self):
super(SiteMonthlyAggregatorUnitTest, self).constructor(baseclass=SiteMonthlyAggregator,
process_name=PROCESS_SITE_MONTHLY,
output_prefix='EXPECTED_SITE_MONTHLY',
output_module=monthly_fixtures,
generate_output=False,
compare_results=True)
daily_fixtures.clean_site_entries()
return daily_fixtures.generated_site_entries()
def virtual_tear_down(self):
daily_fixtures.clean_site_entries()
def _get_key(self, obj):
return obj[DOMAIN_NAME], obj[TIMEPERIOD]
def test_aggregation(self):
super(SiteMonthlyAggregatorUnitTest, self).perform_aggregation()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
gnuhub/intellij-community | python/lib/Lib/decimal.py | 74 | 183326 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module is currently Py2.3 compatible and should be kept that way
# unless a major compelling advantage arises. IOW, 2.3 compatibility is
# strongly preferred, but not guaranteed.
# Also, this module should be kept in sync with the latest updates of
# the IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is a Py2.3 implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
www2.hursley.ibm.com/decimal/decarith.html
and IEEE standard 854-1987:
www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of the expected Decimal("0.00") returned by decimal floating point).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal("0")
>>> Decimal("1")
Decimal("1")
>>> Decimal("-.0123")
Decimal("-0.0123")
>>> Decimal(123456)
Decimal("123456")
>>> Decimal("123.45e12345678901234567890")
Decimal("1.2345E+12345678901234567892")
>>> Decimal("1.33") + Decimal("1.27")
Decimal("2.60")
>>> Decimal("12.34") + Decimal("3.87") - Decimal("18.41")
Decimal("-2.20")
>>> dig = Decimal(1)
>>> print dig / Decimal(3)
0.333333333
>>> getcontext().prec = 18
>>> print dig / Decimal(3)
0.333333333333333333
>>> print dig.sqrt()
1
>>> print Decimal(3).sqrt()
1.73205080756887729
>>> print Decimal(3) ** 123
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print inf
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print neginf
-Infinity
>>> print neginf + inf
NaN
>>> print neginf * inf
-Infinity
>>> print dig / 0
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print dig / 0
Traceback (most recent call last):
...
...
...
DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal("NaN")
>>> c.traps[InvalidOperation] = 1
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> print c.divide(Decimal(0), Decimal(0))
Traceback (most recent call last):
...
...
...
InvalidOperation: 0 / 0
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print c.divide(Decimal(0), Decimal(0))
NaN
>>> print c.flags[InvalidOperation]
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext'
]
import copy as _copy
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return Infsign[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return Infsign[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return Infsign[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return Infsign[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.currentThread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
import sys
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del sys, MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.currentThread(), '__decimal_context__'):
del threading.currentThread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.currentThread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.currentThread().__decimal_context__
except AttributeError:
context = Context()
threading.currentThread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
"""
# The string below can't be included in the docstring until Python 2.6
# as the doctest module doesn't understand __future__ statements
"""
>>> from __future__ import with_statement
>>> print getcontext().prec
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print ctx.prec
...
30
>>> with localcontext(ExtendedContext):
... print getcontext().prec
...
9
>>> print getcontext().prec
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal("3.14")
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal("3.14")
>>> Decimal(314) # int or long
Decimal("314")
>>> Decimal(Decimal(314)) # another decimal instance
Decimal("314")
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, basestring):
m = _parser(value)
if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
if m.group('sign') == "-":
self._sign = 1
else:
self._sign = 0
intpart = m.group('int')
if intpart is not None:
# finite number
fracpart = m.group('frac')
exp = int(m.group('exp') or '0')
if fracpart is not None:
self._int = str((intpart+fracpart).lstrip('0') or '0')
self._exp = exp - len(fracpart)
else:
self._int = str(intpart.lstrip('0') or '0')
self._exp = exp
self._is_special = False
else:
diag = m.group('diag')
if diag is not None:
# NaN
self._int = str(diag.lstrip('0'))
if m.group('signal'):
self._exp = 'N'
else:
self._exp = 'n'
else:
# infinity
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
# From an integer
if isinstance(value, (int,long)):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], (int, long)) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, (int, long)) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], (int, long)):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
raise TypeError("Cannot convert float to Decimal. " +
"First convert the float to a string")
raise TypeError("Cannot convert %r to Decimal" % value)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def __nonzero__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def __cmp__(self, other):
other = _convert_other(other)
if other is NotImplemented:
# Never return NotImplemented
return 1
if self._is_special or other._is_special:
# check for nans, without raising on a signaling nan
if self._isnan() or other._isnan():
return 1 # Comparison involving NaN's always reports self > other
# INF = INF
return cmp(self._isinfinity(), other._isinfinity())
# check for zeros; note that cmp(0, -0) should return 0
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
return cmp(self_padded, other_padded) * (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
def __eq__(self, other):
if not isinstance(other, (Decimal, int, long)):
return NotImplemented
return self.__cmp__(other) == 0
def __ne__(self, other):
if not isinstance(other, (Decimal, int, long)):
return NotImplemented
return self.__cmp__(other) != 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self.__cmp__(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# Decimal integers must hash the same as the ints
#
# The hash of a nonspecial noninteger Decimal must depend only
# on the value of that Decimal, and not on its representation.
# For example: hash(Decimal("100E-1")) == hash(Decimal("10")).
if self._is_special:
if self._isnan():
raise TypeError('Cannot hash a NaN value.')
return hash(str(self))
if not self:
return 0
if self._isinteger():
op = _WorkRep(self.to_integral_value())
return hash((-1)**op.sign*op.int*10**op.exp)
# The value of a nonzero nonspecial Decimal instance is
# faithfully represented by the triple consisting of its sign,
# its adjusted exponent, and its coefficient with trailing
# zeros removed.
return hash((self._sign,
self._exp+len(self._int),
self._int.rstrip('0')))
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return (self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return 'Decimal("%s")' % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if not self:
# -Decimal('0') is Decimal('0'), not Decimal('-0')
ans = self.copy_abs()
else:
ans = self.copy_negate()
if context is None:
context = getcontext()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if not self:
# + (-0) = 0
ans = self.copy_abs()
else:
ans = Decimal(self)
if context is None:
context = getcontext()
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return Infsign[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return Infsign[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __div__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return Infsign[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
__truediv__ = __div__
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rdiv__(self, other, context=None):
"""Swaps self/other and returns __div__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__div__(self, context=context)
__rtruediv__ = __rdiv__
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (Infsign[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return Infsign[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
return float(str(self))
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
context = getcontext()
return context._raise_error(InvalidContext)
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to long")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
def __long__(self):
"""Converts to a long.
Equivalent to long(int(self))
"""
return long(self.__int__())
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if _clamp=0,
# precision-1 if _clamp=1.
max_payload_len = context.prec - context._clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if _clamp==0, and between Etiny and Etop if _clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context._clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
context._raise_error(Inexact)
context._raise_error(Rounded)
return context._raise_error(Overflow, 'above Emax', self._sign)
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
context._raise_error(Subnormal)
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
context._raise_error(Rounded)
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
this_function = getattr(self, self._pick_rounding_function[context.rounding])
changed = this_function(digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
ans = _dec_from_triple(self._sign, coeff, exp_min)
if changed:
context._raise_error(Inexact)
if self_is_subnormal:
context._raise_error(Underflow)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
elif len(ans._int) == context.prec+1:
# we get here only if rescaling rounds the
# cofficient up to exactly 10**context.prec
if ans._exp < Etop:
ans = _dec_from_triple(ans._sign,
ans._int[:-1], ans._exp+1)
else:
# Inexact and Rounded have already been raised
ans = context._raise_error(Overflow, 'above Emax',
self._sign)
return ans
# fold down if _clamp == 1 and self has too few digits
if context._clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
_pick_rounding_function = {}
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = Infsign[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = Infsign[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
third = _convert_other(third, raiseit=True)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
# if can't convert other and modulo to Decimal, raise
# TypeError; there's no point returning NotImplemented (no
# equivalent of __rpow__ for three argument pow)
other = _convert_other(other, raiseit=True)
modulo = _convert_other(modulo, raiseit=True)
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in xrange(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and
# y for the value of other. Write x = xc*10**xe and y =
# yc*10**ye.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. First normalize both x and y. We
# express y as a rational m/n, with m and n relatively prime
# and n>0. Then for x**y to be exactly representable (at
# *any* precision), xc must be the nth power of a positive
# integer and xe must be divisible by n. If m is negative
# then additionally xc must be a power of either 2 or 5, hence
# a power of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
if ye >= 0:
exponent = xe*yc*10**ye
else:
exponent, remainder = divmod(xe*yc, 10**-ye)
if remainder:
return None
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# find e*y and xe*y; both must be integers
if ye >= 0:
y_as_int = yc*10**ye
e = e*y_as_int
xe = xe*y_as_int
else:
ten_pow = 10**-ye
e, remainder = divmod(e*yc, ten_pow)
if remainder:
return None
xe, remainder = divmod(xe*yc, ten_pow)
if remainder:
return None
if e*65 >= p*93: # 93/65 > log(10)/log(5)
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
if ye >= 0:
y_as_integer = yc*10**ye
e = e*y_as_integer
xe = xe*y_as_integer
else:
ten_pow = 10**-ye
e, remainder = divmod(e*yc, ten_pow)
if remainder:
return None
xe, remainder = divmod(xe*yc, ten_pow)
if remainder:
return None
if e*3 >= p*10: # 10/3 > log(10)/log(2)
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1L << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return Dec_p1
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return Infsign[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return Infsign[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == Dec_p1:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return Infsign[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None and result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# the specification says that for non-integer other we need to
# raise Inexact, even when the result is actually exact. In
# the same way, we need to raise Underflow here if the result
# is subnormal. (The call to _fix will take care of raising
# Rounded and Subnormal, as usual.)
if not other._isinteger():
context._raise_error(Inexact)
# pad with zeros up to length context.prec+1 if necessary
if len(ans._int) <= context.prec:
expdiff = context.prec+1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
if ans.adjusted() < context.Emin:
context._raise_error(Underflow)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to use ROUND_HALF_EVEN here
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context._clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
# call to fix takes care of any necessary folddown
ans = ans._fix(context)
return ans
def same_quantum(self, other):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = getattr(self, self._pick_rounding_function[rounding])
changed = this_function(digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
context._raise_error(Rounded)
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.__cmp__(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.__cmp__(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self, context=None):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
if context is None:
context = getcontext()
self_is_nan = self._isnan()
other_is_nan = other._isnan()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return context._raise_error(InvalidOperation, 'NaN in compare_signal',
self)
if other_is_nan:
return context._raise_error(InvalidOperation, 'NaN in compare_signal',
other)
return self.compare(other, context=context)
def compare_total(self, other):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return Dec_n1
if not self._sign and other._sign:
return Dec_p1
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
if self._int < other._int:
if sign:
return Dec_p1
else:
return Dec_n1
if self._int > other._int:
if sign:
return Dec_n1
else:
return Dec_p1
return Dec_0
if sign:
if self_nan == 1:
return Dec_n1
if other_nan == 1:
return Dec_p1
if self_nan == 2:
return Dec_n1
if other_nan == 2:
return Dec_p1
else:
if self_nan == 1:
return Dec_p1
if other_nan == 1:
return Dec_n1
if self_nan == 2:
return Dec_p1
if other_nan == 2:
return Dec_n1
if self < other:
return Dec_n1
if self > other:
return Dec_p1
if self._exp < other._exp:
if sign:
return Dec_p1
else:
return Dec_n1
if self._exp > other._exp:
if sign:
return Dec_n1
else:
return Dec_p1
return Dec_0
def compare_total_mag(self, other):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other):
"""Returns self with the sign of other."""
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return Dec_0
# exp(0) = 1
if not self:
return Dec_p1
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted() <= context.Emax
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return negInf
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return Inf
# ln(1.0) == 0.0
if self == Dec_p1:
return Dec_0
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return negInf
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return Inf
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return Inf
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
return Decimal(self.adjusted())
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite numbers with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join(str(int(a)|int(b)) for a,b in zip(opa,opb))
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join(str(int(a)^int(b)) for a,b in zip(opa,opb))
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs().__cmp__(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs().__cmp__(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return negInf
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return Inf
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self.__cmp__(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Rounded)
context._raise_error(Inexact)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Rounded)
context._raise_error(Inexact)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad:
rotdig = '0'*topad + rotdig
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb (self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
if not torot:
return Decimal(self)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad:
rotdig = '0'*topad + rotdig
# let's shift!
if torot < 0:
rotated = rotdig[:torot]
else:
rotated = rotdig + '0'*torot
rotated = rotated[-context.prec:]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) == Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# support for Jython __tojava__:
def __tojava__(self, java_class):
from java.lang import Object
from java.math import BigDecimal
from org.python.core import Py
if java_class not in (BigDecimal, Object):
return Py.NoConversion
return BigDecimal(str(self))
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
##### Context class #######################################################
# get rounding method function:
rounding_functions = [name for name in Decimal.__dict__.keys()
if name.startswith('_round_')]
for name in rounding_functions:
# name is like _round_half_even, goes to the global ROUND_HALF_EVEN value.
globalname = name[1:].upper()
val = globals()[globalname]
Decimal._pick_rounding_function[val] = name
del name, val, globalname, rounding_functions
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is incremented.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
_clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None,
traps=None, flags=None,
Emin=None, Emax=None,
capitals=None, _clamp=0,
_ignored_flags=None):
if flags is None:
flags = []
if _ignored_flags is None:
_ignored_flags = []
if not isinstance(flags, dict):
flags = dict([(s,s in flags) for s in _signals])
del s
if traps is not None and not isinstance(traps, dict):
traps = dict([(s,s in traps) for s in _signals])
del s
for name, val in locals().items():
if val is None:
setattr(self, name, _copy.copy(getattr(DefaultContext, name)))
else:
setattr(self, name, val)
del self.self
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.traps,
self.flags, self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.traps.copy(),
self.flags.copy(), self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it increments the flag, then, if the corresponding
trap_enabler is set, it reaises the exception. Otherwise, it returns
the default value after incrementing the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] += 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error, explanation
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
def __hash__(self):
"""A Context cannot be hashed."""
# We inherit object.__hash__, so we must deny this explicitly
raise TypeError("Cannot hash a Context.")
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context."""
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self._clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal("2.1")
>>> ExtendedContext.abs(Decimal('-100'))
Decimal("100")
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal("101.5")
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal("101.5")
"""
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal("19.00")
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal("1.02E+4")
"""
return a.__add__(b, context=self)
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal("2.50")
"""
return a.canonical(context=self)
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal("0")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal("0")
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal("1")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal("1")
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal("-1")
"""
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal("-1")
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal("0")
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal("NaN")
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal("NaN")
>>> print c.flags[InvalidOperation]
1
"""
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal("-1")
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal("-1")
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal("-1")
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal("0")
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal("1")
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal("-1")
"""
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal("2.1")
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal("100")
"""
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal objet.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal("2.1")
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal("-1.00")
"""
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal("-101.5")
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal("101.5")
"""
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal("1.50")
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal("1.50")
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal("-1.50")
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal("-1.50")
"""
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal("0.333333333")
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal("0.666666667")
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal("2.5")
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal("0.1")
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal("1")
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal("4.00")
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal("1.20")
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal("10")
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal("1000")
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal("1.20E+6")
"""
return a.__div__(b, context=self)
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal("0")
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal("3")
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal("3")
"""
return a.__floordiv__(b, context=self)
def divmod(self, a, b):
return a.__divmod__(b, context=self)
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal("0")
>>> c.exp(Decimal('-1'))
Decimal("0.367879441")
>>> c.exp(Decimal('0'))
Decimal("1")
>>> c.exp(Decimal('1'))
Decimal("2.71828183")
>>> c.exp(Decimal('0.693147181'))
Decimal("2.00000000")
>>> c.exp(Decimal('+Infinity'))
Decimal("Infinity")
"""
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal("22")
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal("-8")
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal("1.38435736E+12")
"""
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
"""
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
"""
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
"""
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
"""
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
"""
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
"""
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
"""
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
"""
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
"""
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal("-Infinity")
>>> c.ln(Decimal('1.000'))
Decimal("0")
>>> c.ln(Decimal('2.71828183'))
Decimal("1.00000000")
>>> c.ln(Decimal('10'))
Decimal("2.30258509")
>>> c.ln(Decimal('+Infinity'))
Decimal("Infinity")
"""
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal("-Infinity")
>>> c.log10(Decimal('0.001'))
Decimal("-3")
>>> c.log10(Decimal('1.000'))
Decimal("0")
>>> c.log10(Decimal('2'))
Decimal("0.301029996")
>>> c.log10(Decimal('10'))
Decimal("1")
>>> c.log10(Decimal('70'))
Decimal("1.84509804")
>>> c.log10(Decimal('+Infinity'))
Decimal("Infinity")
"""
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal("2")
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal("0")
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal("-2")
>>> ExtendedContext.logb(Decimal('0'))
Decimal("-Infinity")
"""
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal("0")
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal("0")
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal("0")
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal("1")
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal("1000")
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal("10")
"""
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal("111111111")
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal("111111110")
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal("0")
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal("10101010")
"""
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal("0")
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal("1")
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal("1")
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal("1")
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal("1110")
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal("1110")
"""
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal("0")
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal("1")
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal("1")
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal("0")
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal("110")
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal("1101")
"""
return a.logical_xor(b, context=self)
def max(self, a,b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal("3")
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal("3")
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal("1")
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal("7")
"""
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored."""
return a.max_mag(b, context=self)
def min(self, a,b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal("2")
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal("-10")
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal("1.0")
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal("7")
"""
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored."""
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal("-1.3")
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal("1.3")
"""
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together ('long multiplication'),
resulting in a number which may be as long as the sum of the lengths
of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal("3.60")
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal("21")
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal("0.72")
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal("-0.0")
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal("4.28135971E+11")
"""
return a.__mul__(b, context=self)
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal("0.999999999")
>>> c.next_minus(Decimal('1E-1007'))
Decimal("0E-1007")
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal("-1.00000004")
>>> c.next_minus(Decimal('Infinity'))
Decimal("9.99999999E+999")
"""
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal("1.00000001")
>>> c.next_plus(Decimal('-1E-1007'))
Decimal("-0E-1007")
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal("-1.00000002")
>>> c.next_plus(Decimal('-Infinity'))
Decimal("-9.99999999E+999")
"""
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal("1.00000001")
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal("-0E-1007")
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal("-1.00000002")
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal("0.999999999")
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal("0E-1007")
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal("-1.00000004")
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal("-0.00")
"""
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal("2.1")
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal("-2")
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal("1.2")
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal("-1.2E+2")
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal("1.2E+2")
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal("0")
"""
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = Context(ExtendedContext)
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
"""
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal("1.3")
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal("-1.3")
"""
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal("8")
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal("-8")
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal("0.125")
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal("69.7575744")
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal("2.00000000")
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal("0")
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal("1")
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal("Infinity")
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal("-0")
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal("1")
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal("-Infinity")
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal("Infinity")
>>> c.power(Decimal('0'), Decimal('0'))
Decimal("NaN")
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal("11")
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal("-11")
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal("1")
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal("11")
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal("11729830")
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal("-0")
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal("1")
"""
return a.__pow__(b, modulo, context=self)
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal("2.170")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal("2.17")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal("2.2")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal("2")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal("0E+1")
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal("-Infinity")
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal("-0")
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal("-0E+5")
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal("217.0")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal("217")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal("2.2E+2")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal("2E+2")
"""
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal("10")
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal("2.1")
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal("1")
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal("0.2")
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal("0.1")
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal("1.0")
"""
return a.__mod__(b, context=self)
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal("-0.9")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal("-2")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal("1")
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal("0.2")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal("0.1")
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal("-0.3")
"""
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal("400000003")
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal("12")
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal("891234567")
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal("123456789")
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal("345678912")
"""
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
"""
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal("0.0750")
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal("7.50")
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal("7.50E+3")
"""
return a.scaleb (b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal("400000000")
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal("0")
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal("1234567")
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal("123456789")
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal("345678900")
"""
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal("0")
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal("-0")
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal("0.624499800")
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal("10")
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal("1")
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal("1.0")
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal("1.0")
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal("2.64575131")
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal("3.16227766")
>>> ExtendedContext.prec
9
"""
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal("0.23")
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal("0.00")
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal("-0.77")
"""
return a.__sub__(b, context=self)
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal("2")
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal("100")
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal("100")
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal("102")
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal("-102")
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal("1.0E+6")
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal("7.89E+77")
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal("-Infinity")
"""
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal("2")
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal("100")
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal("100")
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal("102")
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal("-102")
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal("1.0E+6")
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal("7.89E+77")
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal("-Infinity")
"""
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int or long
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
# This function from Tim Peters was taken from here:
# http://mail.python.org/pipermail/python-list/1999-July/007758.html
# The correction being in the function definition is for speed, and
# the whole function is not resolved with math.log because of avoiding
# the use of floats.
def _nbits(n, correction = {
'0': 4, '1': 3, '2': 2, '3': 2,
'4': 1, '5': 1, '6': 1, '7': 1,
'8': 0, '9': 0, 'a': 0, 'b': 0,
'c': 0, 'd': 0, 'e': 0, 'f': 0}):
"""Number of bits in binary representation of the positive integer n,
or 0 if n == 0.
"""
if n < 0:
raise ValueError("The argument to _nbits should be nonnegative.")
hex_n = "%x" % n
return 4*len(hex_n) - correction[hex_n[0]]
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1L << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and long(abs(y)) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest(long(M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in xrange(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((long(x)<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = long(M)<<R
for i in xrange(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in xrange(R-1, -1, -1):
Mshift = long(M)<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
"""
if isinstance(other, Decimal):
return other
if isinstance(other, (int, long)):
return Decimal(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=999999999,
Emin=-999999999,
capitals=1
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
import re
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#
# As the flag UNICODE is not enabled here, we're explicitly avoiding any
# other meaning for \d than the numbers [0-9].
import re
_parser = re.compile(r""" # A numeric string consists of:
# \s*
(?P<sign>[-+])? # an optional sign, followed by either...
(
(?=\d|\.\d) # ...a number (with at least one digit)
(?P<int>\d*) # consisting of a (possibly empty) integer part
(\.(?P<frac>\d*))? # followed by an optional fractional part
(E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
|
Inf(inity)? # ...an infinity, or...
|
(?P<signal>s)? # ...an (optionally signaling)
NaN # NaN
(?P<diag>\d*) # with (possibly empty) diagnostic information.
)
# \s*
$
""", re.VERBOSE | re.IGNORECASE).match
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
del re
##### Useful Constants (internal use only) ################################
# Reusable defaults
Inf = Decimal('Inf')
negInf = Decimal('-Inf')
NaN = Decimal('NaN')
Dec_0 = Decimal(0)
Dec_p1 = Decimal(1)
Dec_n1 = Decimal(-1)
# Infsign[sign] is infinity w/ that sign
Infsign = (Inf, negInf)
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| apache-2.0 |
gavruskin/microinteractions | data_preprocess_Development.py | 1 | 9649 | import pandas as pd
data = pd.read_csv("DevelopmentData.csv")
n = len(data.columns)
# Add all parameters (Taylor coefficients) as 0 in rows following the data:
for i in range(data.shape[0]):
for j in range(n+2, n+34):
data.set_value(i, j, 0)
data.rename(columns={n+2: "a", n+3: "a1", n+4: "a2", n+5: "a3", n+6: "a4", n+7: "a5",
n+8: "b12", n+9: "b13", n+10: "b14", n+11: "b15", n+12: "b23", n+13: "b24",
n+14: "b25", n+15: "b34", n+16: "b35", n+17: "b45", n+18: "c123", n+19: "c124",
n+20: "c125", n+21: "c134", n+22: "c135", n+23: "c145", n+24: "c234", n+25: "c235",
n+26: "c245", n+27: "c345", n+28: "d1234", n+29: "d1235", n+30: "d1245",
n+31: "d1345", n+32: "d2345", n+33: "e12345"}, inplace=True)
# Change coefficients corresponding to present effects to 1:
for index, row in data.iterrows():
combo = row["treat"]
if combo == 1:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
if combo == 2:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
if combo == 3:
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
if combo == 4:
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
if combo == 5:
data.set_value(index, "a", 1)
data.set_value(index, "a5", 1)
if combo == 6:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "b12", 1)
if combo == 7:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b13", 1)
if combo == 8:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b14", 1)
if combo == 9:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b15", 1)
if combo == 10:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b23", 1)
if combo == 11:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b24", 1)
if combo == 12:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b25", 1)
if combo == 13:
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b34", 1)
if combo == 14:
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b35", 1)
if combo == 15:
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b45", 1)
if combo == 16:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "c123", 1)
if combo == 17:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "c124", 1)
if combo == 18:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "c125", 1)
if combo == 22:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "c234", 1)
if combo == 25:
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c345", 1)
if combo == 19:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "c134", 1)
if combo == 20:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "c135", 1)
if combo == 21:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c145", 1)
if combo == 24:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c245", 1)
if combo == 23:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "c235", 1)
if combo == 26:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "d1234", 1)
if combo == 27:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "c123", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "d1235", 1)
if combo == 28:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "d1245", 1)
if combo == 29:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1345", 1)
if combo == 30:
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d2345", 1)
if combo == 31:
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", 1)
data.set_value(index, "b13", 1)
data.set_value(index, "b14", 1)
data.set_value(index, "b15", 1)
data.set_value(index, "b23", 1)
data.set_value(index, "b24", 1)
data.set_value(index, "b25", 1)
data.set_value(index, "b34", 1)
data.set_value(index, "b35", 1)
data.set_value(index, "b45", 1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1234", 1)
data.set_value(index, "d1235", 1)
data.set_value(index, "d1245", 1)
data.set_value(index, "d1345", 1)
data.set_value(index, "d2345", 1)
data.set_value(index, "e12345", 1)
if combo == 32:
data.set_value(index, "a", 1)
data.to_csv("DevelopmentData_processed.csv")
| mit |
belokop-an/agenda-tools | code/htdocs/contributionModification.py | 1 | 3733 | from MaKaC.webinterface.rh import contribMod
def index(req, **params):
return contribMod.RHContributionModification( req ).process( params )
def newPrimAuthor(req, **params):
return contribMod.RHNewPrimaryAuthor( req ).process( params )
def searchPrimAuthor(req, **params):
return contribMod.RHSearchPrimaryAuthor( req ).process( params )
def searchAddPrimAuthor(req, **params):
return contribMod.RHSearchAddPrimaryAuthor( req ).process( params )
def searchCoAuthor(req, **params):
return contribMod.RHSearchCoAuthor( req ).process( params )
def searchAddCoAuthor(req, **params):
return contribMod.RHSearchAddCoAuthor( req ).process( params )
def remPrimAuthors(req, **params):
return contribMod.RHRemPrimaryAuthors( req ).process( params )
def modPrimAuthor(req, **params):
return contribMod.RHEditPrimaryAuthor( req ).process( params )
def newCoAuthor(req, **params):
return contribMod.RHNewCoAuthor( req ).process( params )
def remCoAuthors(req, **params):
return contribMod.RHRemCoAuthors( req ).process( params )
def modCoAuthor(req, **params):
return contribMod.RHEditCoAuthor( req ).process( params )
def addMaterial(req, **params):
return contribMod.RHContributionAddMaterial( req ).process( params )
def performAddMaterial(req, **params):
return contribMod.RHContributionPerformAddMaterial( req ).process( params )
#def materials(req, **params):
# return contribMod.RHContributionPerformAddMaterial( req ).process( params )
def removeMaterials( req, **params ):
return contribMod.RHContributionRemoveMaterials( req ).process( params )
def move( req, **params ):
return contribMod.RHContributionMove( req ).process( params )
def performMove( req, **params ):
return contribMod.RHContributionPerformMove( req ).process( params )
def data( req, **params ):
return contribMod.RHContributionData( req ).process( params )
def xml( req, **params ):
return contribMod.RHContributionToXML( req ).process( params )
def pdf( req, **params ):
return contribMod.RHContributionToPDF( req ).process( params )
def modifData( req, **params ):
return contribMod.RHContributionModifData( req ).process( params )
def addSpk( req, **params ):
return contribMod.RHAddSpeakers( req ).process( params )
def remSpk( req, **params ):
return contribMod.RHRemSpeakers( req ).process( params )
def searchSpk( req, **params ):
return contribMod.RHSearchSpeakers( req ).process( params )
def searchAddSpk(req, **params):
return contribMod.RHSearchAddSpeakers( req ).process( params )
def setTrack( req, **params ):
return contribMod.RHSetTrack( req ).process( params )
def setSession( req, **params ):
return contribMod.RHSetSession( req ).process( params )
def withdraw(req, **params):
return contribMod.RHWithdraw( req ).process( params )
def primAuthUp(req, **params):
return contribMod.RHPrimAuthUp( req ).process( params )
def primAuthDown(req, **params):
return contribMod.RHPrimAuthDown( req ).process( params )
def coAuthUp(req, **params):
return contribMod.RHCoAuthUp( req ).process( params )
def coAuthDown(req, **params):
return contribMod.RHCoAuthDown( req ).process( params )
def primaryAuthorAction(req, **params):
return contribMod.RHPrimaryAuthorsActions( req ).process( params )
def coAuthorAction(req, **params):
return contribMod.RHCoAuthorsActions( req ).process( params )
def newSpeaker(req, **params):
return contribMod.RHNewSpeaker( req ).process( params )
def modSpeaker(req, **params):
return contribMod.RHEditSpeaker( req ).process( params )
def materials(req, **params):
return contribMod.RHMaterials( req ).process( params )
| gpl-2.0 |
dydek/django | django/contrib/gis/utils/layermapping.py | 8 | 27310 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except Exception: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| bsd-3-clause |
eoyilmaz/anima | anima/mocap/xsens.py | 1 | 5719 | # -*- coding: utf-8 -*-
from collections import namedtuple
# Data containers
Header = namedtuple(
'Header',
['sample_counter', 'datagram_counter', 'num_items', 'timecode',
'charID', 'extra_data']
)
Euler = namedtuple(
'Euler',
['segment_ID', 'tx', 'ty', 'tz', 'rx', 'ry', 'rz']
)
Quaternion = namedtuple(
'Quaternion',
['segment_ID', 'tx', 'ty', 'tz', 'q1', 'q2', 'q3', 'q4']
)
TimeCode = namedtuple('TimeCode', 'tc')
# Formats
header_data_format = '!IBBIc7s'
euler_data_format = '!i6f'
quaternion_data_format = '!i7f'
class XSensListener(object):
"""Network listener and parser for XSens data
"""
def listen(self, host='localhost', port=9763, timeout=2):
"""listens and prints XSens stream
"""
import struct
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(timeout)
s.bind((host, port))
while True:
data = s.recv(2048)
packages = data.split('MXTP')
for package in packages:
packet_type_id = package[:2]
raw_data = package[2:]
header = raw_data[:18] # excluding the MXTP01 ID String
if len(header) < 18:
continue
header_data = Header._make(
struct.unpack(header_data_format, header)
)
# parse the rest of the data by package type
raw_pose_data = raw_data[18:]
pose_data = []
if packet_type_id == '01':
# euler data
chunks = map(''.join, zip(*[iter(raw_pose_data)] * 28))
for chunk in chunks:
unpacked_data = struct.unpack(euler_data_format, chunk)
euler_data = Euler._make(unpacked_data)
pose_data.append(euler_data)
elif packet_type_id == '02':
# quaternion data
# euler data
chunks = map(''.join, zip(*[iter(raw_pose_data)] * 32))
for chunk in chunks:
unpacked_data = struct.unpack(quaternion_data_format, chunk)
quaternion_data = Quaternion._make(unpacked_data)
pose_data.append(quaternion_data)
elif packet_type_id == '25':
# TimeCode
pose_data.append(TimeCode._make(raw_pose_data))
yield ([header_data, pose_data])
# elif packet_type_id == '03':
# print('Pose data - MVN Optical marker set 1')
# print(raw_pose_data)
#
# elif packet_type_id == '04':
# print('Pose data - Motion Grid Tag data (Deprecated)')
# print(raw_pose_data)
#
# elif packet_type_id == '05':
# print('Pose data - Unity3D')
# print(raw_pose_data)
#
# elif packet_type_id == '10':
# print('Pose data - Scale Information (Deprecated)')
# print(raw_pose_data)
#
# elif packet_type_id == '1!':
# print('Pose data - Prop Information (Deprecated)')
# print(raw_pose_data)
#
# elif packet_type_id == '12':
# print('Character Information -> meta data')
# print(raw_pose_data)
#
# elif packet_type_id == '13':
# print('Character Information -> Scaling Information')
# print(raw_pose_data)
#
# elif packet_type_id == '20':
# print('Joint Angle data')
# print(raw_pose_data)
#
# elif packet_type_id == '21':
# print('Linear Segment Kinematics')
# print(raw_pose_data)
#
# elif packet_type_id == '22':
# print('Angular Segment Kinematics')
# print(raw_pose_data)
#
# elif packet_type_id == '23':
# print('Motion Tracker Kinematics')
# print(raw_pose_data)
#
# elif packet_type_id == '24':
# print('Center Of Mass')
# print(raw_pose_data)
class XSensStore(object):
"""Stores XSens data in a file
"""
def __init__(self, output_file_fullpath=''):
self.output_file_fullpath = output_file_fullpath
def store(self, stream):
"""Stores the data until the stream ends
:param stream:
:return:
"""
with open(self.output_file_fullpath, 'wb'):
while stream:
pass
class XSensGenerator(object):
"""Generates XSens compatible data
:param source: A generator, if None, a random sequence will be generated.
"""
def __init__(self, source=None, fps=240, generate_euler=True,
generate_quaternion=True):
self.source = source
self.fps = fps
self.generate_euler = generate_euler
self.generate_quaternion = generate_quaternion
def generate(self):
"""generate data
"""
import time
for data in self.source:
time.sleep(1.0/self.fps)
yield data
def _random_sequence_generator(self):
"""generates random data
"""
pass | mit |
GreenLunar/Bookie | bookie/tests/test_models/test_bmark.py | 7 | 2534 | """Test the basics including the bmark and tags"""
from bookie.models import (
DBSession,
Bmark,
)
from bookie.models.auth import User
from bookie.tests import gen_random_word
from bookie.tests import TestDBBase
class TestBmark(TestDBBase):
"""Handle bmark function checks"""
def test_has_access_same_user_public(self):
"""Test that a user can view their own public bookmark"""
user = User()
user.username = gen_random_word(10)
DBSession.add(user)
b = Bmark(
url=gen_random_word(12),
username=user.username,
is_private=False,
)
b.hash_id = gen_random_word(3)
DBSession.add(b)
res = b.has_access(user.username)
self.assertEqual(True, res)
def test_has_access_same_user_private(self):
"""Test that a user can view their own private bookmark"""
user = User()
user.username = gen_random_word(10)
DBSession.add(user)
b = Bmark(
url=gen_random_word(12),
username=user.username,
is_private=True,
)
b.hash_id = gen_random_word(3)
DBSession.add(b)
res = b.has_access(user.username)
self.assertEqual(True, res)
def test_has_access_diff_user_public(self):
"""Test that a different user can view another's public bookmark"""
user = User()
user.username = gen_random_word(10)
DBSession.add(user)
b = Bmark(
url=gen_random_word(12),
username=gen_random_word(10),
is_private=False,
)
b.hash_id = gen_random_word(3)
DBSession.add(b)
res = b.has_access(user.username)
self.assertEqual(True, res)
# Also check if user is None.
username = None
res = b.has_access(username)
self.assertEqual(True, res)
def test_has_access_diff_user_private(self):
"""Test that a different user cannot view another's private bookmark"""
user = User()
user.username = gen_random_word(10)
DBSession.add(user)
b = Bmark(
url=gen_random_word(12),
username=gen_random_word(10),
is_private=True,
)
b.hash_id = gen_random_word(3)
DBSession.add(b)
res = b.has_access(user.username)
self.assertEqual(False, res)
# Also check if user is None.
username = None
res = b.has_access(username)
self.assertEqual(False, res)
| agpl-3.0 |
mozilla/pontoon | pontoon/sync/formats/ftl.py | 2 | 5349 | import codecs
import copy
import logging
from fluent.syntax import ast, FluentParser, FluentSerializer
from pontoon.sync.exceptions import SyncError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.utils import create_parent_directory
from pontoon.sync.vcs.models import VCSTranslation
log = logging.getLogger(__name__)
parser = FluentParser()
serializer = FluentSerializer()
localizable_entries = (ast.Message, ast.Term)
class FTLEntity(VCSTranslation):
"""
Represents entities in FTL (without its attributes).
"""
def __init__(
self,
key,
source_string,
source_string_plural,
strings,
comments=None,
group_comments=None,
resource_comments=None,
order=None,
):
super().__init__(
key=key,
source_string=source_string,
source_string_plural=source_string_plural,
strings=strings,
comments=comments or [],
group_comments=group_comments or [],
resource_comments=resource_comments or [],
fuzzy=False,
order=order,
)
def __repr__(self):
return "<FTLEntity {key}>".format(key=self.key.encode("utf-8"))
class FTLResource(ParsedResource):
def __init__(self, path, locale, source_resource=None):
self.path = path
self.locale = locale
self.entities = {}
self.source_resource = source_resource
self.order = 0
# Copy entities from the source_resource if it's available.
if source_resource:
for key, entity in source_resource.entities.items():
self.entities[key] = FTLEntity(
entity.key,
"",
"",
{},
copy.copy(entity.comments),
copy.copy(entity.group_comments),
copy.copy(entity.resource_comments),
entity.order,
)
try:
with codecs.open(path, "r", "utf-8") as resource:
self.structure = parser.parse(resource.read())
except OSError:
# If the file doesn't exist, but we have a source resource,
# we can keep going, we'll just not have any translations.
if source_resource:
return
else:
raise
group_comment = []
resource_comment = []
for obj in self.structure.body:
if isinstance(obj, localizable_entries):
key = get_key(obj)
comment = [obj.comment.content] if obj.comment else []
# Do not store comments in the string column
obj.comment = None
translation = serializer.serialize_entry(obj)
self.entities[key] = FTLEntity(
key,
translation,
"",
{None: translation},
comment,
group_comment,
resource_comment,
self.order,
)
self.order += 1
elif isinstance(obj, ast.GroupComment):
group_comment = [obj.content]
elif isinstance(obj, ast.ResourceComment):
resource_comment += [obj.content]
@property
def translations(self):
return sorted(self.entities.values(), key=lambda e: e.order)
def save(self, locale):
"""
Load the source resource, modify it with changes made to this
Resource instance, and save it over the locale-specific
resource.
"""
if not self.source_resource:
raise SyncError(
"Cannot save FTL resource {}: No source resource given.".format(
self.path
)
)
with codecs.open(self.source_resource.path, "r", "utf-8") as resource:
structure = parser.parse(resource.read())
entities = structure.body
# Use list() to iterate over a copy, leaving original free to modify
for obj in list(entities):
if isinstance(obj, localizable_entries):
index = entities.index(obj)
key = get_key(obj)
entity = self.entities[key]
if entity.strings:
message = parser.parse_entry(entity.strings[None])
message.comment = obj.comment
entities[index] = message
else:
del entities[index]
create_parent_directory(self.path)
with codecs.open(self.path, "w+", "utf-8") as f:
log.debug("Saving file: %s", self.path)
f.write(serializer.serialize(structure))
def get_key(obj):
"""
Get FTL Message/Term key as it appears in the file.
In case of a Term, we need to prepend -. See bug 1521523.
"""
key = obj.id.name
if isinstance(obj, ast.Term):
return "-" + key
return key
def parse(path, source_path=None, locale=None):
if source_path is not None:
source_resource = FTLResource(source_path, locale)
else:
source_resource = None
return FTLResource(path, locale, source_resource)
| bsd-3-clause |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/mimeparse.py | 31 | 6452 | """MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
from functools import reduce
__version__ = '0.1.4'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not 'q' in params or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or
type == '*' or
target_type == '*')
subtype_match = (subtype == target_subtype or
subtype == '*' or
target_subtype == '*')
if type_match and subtype_match:
param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in
list(target_params.items()) if key != 'q' and
key in params and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges. """
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| mit |
viggates/nova | nova/tests/integrated/v3/test_console_output.py | 31 | 1106 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import test_servers
class ConsoleOutputSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-console-output"
def test_get_console_output(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'console-output-post-req', {})
subs = self._get_regexes()
self._verify_response('console-output-post-resp', subs, response, 200)
| apache-2.0 |
hynnet/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/bsddb/dbutils.py | 157 | 2964 | #------------------------------------------------------------------------
#
# Copyright (C) 2000 Autonomous Zone Industries
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# Author: Gregory P. Smith <greg@krypto.org>
#
# Note: I don't know how useful this is in reality since when a
# DBLockDeadlockError happens the current transaction is supposed to be
# aborted. If it doesn't then when the operation is attempted again
# the deadlock is still happening...
# --Robin
#
#------------------------------------------------------------------------
#
# import the time.sleep function in a namespace safe way to allow
# "from bsddb.dbutils import *"
#
from time import sleep as _sleep
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
import db
# always sleep at least N seconds between retrys
_deadlock_MinSleepTime = 1.0/128
# never sleep more than N seconds between retrys
_deadlock_MaxSleepTime = 3.14159
# Assign a file object to this for a "sleeping" message to be written to it
# each retry
_deadlock_VerboseFile = None
def DeadlockWrap(function, *_args, **_kwargs):
"""DeadlockWrap(function, *_args, **_kwargs) - automatically retries
function in case of a database deadlock.
This is a function intended to be used to wrap database calls such
that they perform retrys with exponentially backing off sleeps in
between when a DBLockDeadlockError exception is raised.
A 'max_retries' parameter may optionally be passed to prevent it
from retrying forever (in which case the exception will be reraised).
d = DB(...)
d.open(...)
DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar"
"""
sleeptime = _deadlock_MinSleepTime
max_retries = _kwargs.get('max_retries', -1)
if 'max_retries' in _kwargs:
del _kwargs['max_retries']
while True:
try:
return function(*_args, **_kwargs)
except db.DBLockDeadlockError:
if _deadlock_VerboseFile:
_deadlock_VerboseFile.write(
'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
_sleep(sleeptime)
# exponential backoff in the sleep time
sleeptime *= 2
if sleeptime > _deadlock_MaxSleepTime:
sleeptime = _deadlock_MaxSleepTime
max_retries -= 1
if max_retries == -1:
raise
#------------------------------------------------------------------------
| gpl-2.0 |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/requests-2.2.1-py2.7.egg/requests/packages/urllib3/contrib/ntlmpool.py | 714 | 4741 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| gpl-2.0 |
ofek/bit | tests/test_base58.py | 1 | 1028 | import pytest
from bit.base58 import b58decode, b58decode_check, b58encode, b58encode_check
from bit.format import MAIN_PUBKEY_HASH
from .samples import BINARY_ADDRESS, BITCOIN_ADDRESS, PUBKEY_HASH
def test_b58encode():
assert b58encode(BINARY_ADDRESS) == BITCOIN_ADDRESS
assert b58encode(BINARY_ADDRESS[:1]) == BITCOIN_ADDRESS[:1]
def test_b58encode_check():
assert b58encode_check(MAIN_PUBKEY_HASH + PUBKEY_HASH) == BITCOIN_ADDRESS
class TestB58Decode:
def test_b58decode_success(self):
assert b58decode(BITCOIN_ADDRESS) == BINARY_ADDRESS
assert b58decode(BITCOIN_ADDRESS[:1]) == b'\x00\x00'
def test_b58decode_failure(self):
with pytest.raises(ValueError):
b58decode('l')
class TestB58DecodeCheck:
def test_b58decode_check_success(self):
assert b58decode_check(BITCOIN_ADDRESS) == MAIN_PUBKEY_HASH + PUBKEY_HASH
def test_b58decode_check_failure(self):
with pytest.raises(ValueError):
b58decode_check(BITCOIN_ADDRESS[:-1])
| mit |
tysonclugg/django | django/db/backends/base/creation.py | 20 | 11972 | import sys
from io import StringIO
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.db import router
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation:
"""
Encapsulate backend-specific differences pertaining to creation and
destruction of the test database.
"""
def __init__(self, connection):
self.connection = connection
@property
def _nodb_connection(self):
"""
Used to be defined here, now moved to DatabaseWrapper.
"""
return self.connection._nodb_connection
def create_test_db(self, verbosity=1, autoclobber=False, serialize=True, keepdb=False):
"""
Create a test database, prompting the user for confirmation if the
database already exists. Return the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
action = 'Creating'
if keepdb:
action = "Using existing"
print("%s test database for alias %s..." % (
action,
self._get_database_display_str(verbosity, test_database_name),
))
# We could skip this call if keepdb is True, but we instead
# give it the keepdb param. This is to handle the case
# where the test DB doesn't exist, in which case we need to
# create it, then just not destroy it. If we instead skip
# this, we will get an exception.
self._create_test_db(verbosity, autoclobber, keepdb)
self.connection.close()
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
# We report migrate messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded).
call_command(
'migrate',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
run_syncdb=True,
)
# We then serialize the current state of the database into a string
# and store it on the connection. This slightly horrific process is so people
# who are testing on databases without transactions or who are using
# a TransactionTestCase still get a clean database on every test run.
if serialize:
self.connection._test_serialized_contents = self.serialize_db_to_string()
call_command('createcachetable', database=self.connection.alias)
# Ensure a connection for the side effect of initializing the test database.
self.connection.ensure_connection()
return test_database_name
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary
database whose settings are given.
"""
self.connection.settings_dict['NAME'] = primary_settings_dict['NAME']
def serialize_db_to_string(self):
"""
Serialize all data in the database into a JSON string.
Designed only for test runner usage; will not handle large
amounts of data.
"""
# Build list of all apps to serialize
from django.db.migrations.loader import MigrationLoader
loader = MigrationLoader(self.connection)
app_list = []
for app_config in apps.get_app_configs():
if (
app_config.models_module is not None and
app_config.label in loader.migrated_apps and
app_config.name not in settings.TEST_NON_SERIALIZED_APPS
):
app_list.append((app_config, None))
# Make a function to iteratively return every object
def get_objects():
for model in serializers.sort_dependencies(app_list):
if (model._meta.can_migrate(self.connection) and
router.allow_migrate_model(self.connection.alias, model)):
queryset = model._default_manager.using(self.connection.alias).order_by(model._meta.pk.name)
yield from queryset.iterator()
# Serialize to a string
out = StringIO()
serializers.serialize("json", get_objects(), indent=None, stream=out)
return out.getvalue()
def deserialize_db_from_string(self, data):
"""
Reload the database with data from a string generated by
the serialize_db_to_string() method.
"""
data = StringIO(data)
for obj in serializers.deserialize("json", data, using=self.connection.alias):
obj.save()
def _get_database_display_str(self, verbosity, database_name):
"""
Return display string for a database for use in various actions.
"""
return "'%s'%s" % (
self.connection.alias,
(" ('%s')" % database_name) if verbosity >= 2 else '',
)
def _get_test_db_name(self):
"""
Internal implementation - return the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
settings.
"""
if self.connection.settings_dict['TEST']['NAME']:
return self.connection.settings_dict['TEST']['NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _execute_create_test_db(self, cursor, parameters, keepdb=False):
cursor.execute('CREATE DATABASE %(dbname)s %(suffix)s' % parameters)
def _create_test_db(self, verbosity, autoclobber, keepdb=False):
"""
Internal implementation - create the test db tables.
"""
test_database_name = self._get_test_db_name()
test_db_params = {
'dbname': self.connection.ops.quote_name(test_database_name),
'suffix': self.sql_table_creation_suffix(),
}
# Create the test database and connect to it.
with self._nodb_connection.cursor() as cursor:
try:
self._execute_create_test_db(cursor, test_db_params, keepdb)
except Exception as e:
# if we want to keep the db, then no need to do any of the below,
# just return and skip it all.
if keepdb:
return test_database_name
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database for alias %s..." % (
self._get_database_display_str(verbosity, test_database_name),
))
cursor.execute('DROP DATABASE %(dbname)s' % test_db_params)
self._execute_create_test_db(cursor, test_db_params, keepdb)
except Exception as e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
return test_database_name
def clone_test_db(self, number, verbosity=1, autoclobber=False, keepdb=False):
"""
Clone a test database.
"""
source_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
action = 'Cloning test database'
if keepdb:
action = 'Using existing clone'
print("%s for alias %s..." % (
action,
self._get_database_display_str(verbosity, source_database_name),
))
# We could skip this call if keepdb is True, but we instead
# give it the keepdb param. See create_test_db for details.
self._clone_test_db(number, verbosity, keepdb)
def get_test_db_clone_settings(self, number):
"""
Return a modified connection settings dict for the n-th clone of a DB.
"""
# When this function is called, the test database has been created
# already and its name has been copied to settings_dict['NAME'] so
# we don't need to call _get_test_db_name.
orig_settings_dict = self.connection.settings_dict
new_settings_dict = orig_settings_dict.copy()
new_settings_dict['NAME'] = '{}_{}'.format(orig_settings_dict['NAME'], number)
return new_settings_dict
def _clone_test_db(self, number, verbosity, keepdb=False):
"""
Internal implementation - duplicate the test db tables.
"""
raise NotImplementedError(
"The database backend doesn't support cloning databases. "
"Disable the option to run tests in parallel processes.")
def destroy_test_db(self, old_database_name=None, verbosity=1, keepdb=False, number=None):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
if number is None:
test_database_name = self.connection.settings_dict['NAME']
else:
test_database_name = self.get_test_db_clone_settings(number)['NAME']
if verbosity >= 1:
action = 'Destroying'
if keepdb:
action = 'Preserving'
print("%s test database for alias %s..." % (
action,
self._get_database_display_str(verbosity, test_database_name),
))
# if we want to preserve the database
# skip the actual destroying piece.
if not keepdb:
self._destroy_test_db(test_database_name, verbosity)
# Restore the original database name
if old_database_name is not None:
settings.DATABASES[self.connection.alias]["NAME"] = old_database_name
self.connection.settings_dict["NAME"] = old_database_name
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
with self.connection._nodb_connection.cursor() as cursor:
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Return a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
self._get_test_db_name(),
)
| bsd-3-clause |
mitchrule/Miscellaneous | Django_Project/django/Lib/site-packages/django/contrib/gis/management/commands/ogrinspect.py | 111 | 5738 | import argparse
import inspect
from django.contrib.gis import gdal
from django.core.management.base import BaseCommand, CommandError
class LayerOptionAction(argparse.Action):
"""
Custom argparse action for the `ogrinspect` `layer_key` keyword option
which may be an integer or a string.
"""
def __call__(self, parser, namespace, value, option_string=None):
try:
setattr(namespace, self.dest, int(value))
except ValueError:
setattr(namespace, self.dest, value)
class ListOptionAction(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == 'true':
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(','))
class Command(BaseCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('data_source', help='Path to the data source.')
parser.add_argument('model_name', help='Name of the model to create.')
parser.add_argument('--blank', dest='blank',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.')
parser.add_argument('--decimal', dest='decimal',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.')
parser.add_argument('--geom-name', dest='geom_name', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)')
parser.add_argument('--layer', dest='layer_key',
action=LayerOptionAction, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.')
parser.add_argument('--multi-geom', action='store_true',
dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.')
parser.add_argument('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__`/`__str__` function.')
parser.add_argument('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` statement.')
parser.add_argument('--null', dest='null', action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.')
parser.add_argument('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.')
parser.add_argument('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
def handle(self, *args, **options):
data_source, model_name = options.pop('data_source'), options.pop('model_name')
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.GDALException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = {k: v for k, v in options.items()
if k in inspect.getargspec(_ogrinspect).args and v is not None}
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = {v: k for k, v in mapping_dict.items()}
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s' : '%s'," % (
rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields
)
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
| mit |
CylanceSPEAR/IntroductionToMachineLearningForSecurityPros | IDPanel/train_lr_model.py | 1 | 4200 | from idpanel.training.vectorization import load_raw_feature_vectors
from idpanel.training.features import load_raw_features
from idpanel.labels import load_labels
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
import warnings
from sklearn.metrics import roc_curve, auc
import numpy as np
import matplotlib.pyplot as plt
import pickle
def classify(model, sample):
labels = sorted(model.keys())
proba = []
for label in labels:
proba.append(model[label].predict_proba(sample)[0, 1])
label = None
proba = np.array(proba)
if (proba > 0.5).sum() > 0:
label = labels[proba.argmax()]
return label, labels, proba
if __name__ == "__main__" or True:
from argparse import ArgumentParser
parser = ArgumentParser(
prog=__file__,
description="Train Logistic Regression Model",
)
parser.add_argument("-p", "--penalty", choices=["l1", "l2"], default="l2")
parser.add_argument("-d", "--dual", action='store_true', default=False)
parser.add_argument("-C", type=float, default=1.0)
parser.add_argument("-f", "--fit-intercept", default=True, action='store_true')
parser.add_argument("-i", "--intercept-scaling", type=float, default=1.0)
parser.add_argument("-m", "--max-iter", type=int, default=100)
parser.add_argument("-s", "--solver", choices=["newton-cg", "lbfgs", "liblinear", "sag"], default="liblinear")
parser.add_argument("-t", "--tol", type=float, default=0.0001)
args = parser.parse_args()
warnings.warn = lambda x, y: x
label_indeces = load_labels()
raw_features = load_raw_features()
original_labels, names, vectors = load_raw_feature_vectors()
labels = [1 if l != "not_panel" else 0 for l in original_labels]
vectors = np.array(vectors)
print "Creating training and testing sets"
X_train, X_test, y_train, y_test = train_test_split(vectors, labels, stratify=labels)
print X_train.shape[0], "samples in training set,", len(set(list(y_train))), "labels in training set"
print X_test.shape[0], "samples in training set,", len(set(list(y_test))), "labels in testing set"
lr = LogisticRegression(
n_jobs=-1,
penalty=args.penalty,
dual=args.dual,
C=args.C,
fit_intercept=args.fit_intercept,
intercept_scaling=args.intercept_scaling,
max_iter=args.max_iter,
solver=args.solver,
tol=args.tol
)
lr.fit(X_train, y_train)
#print (lr.feature_importances_ != 0).sum()
pred = lr.predict(X_test)
pred_proba = lr.predict_proba(X_test)
print "Confusion Matrix:"
print confusion_matrix(y_test, pred)
#print np.array(y_test) == 1
pos_hist, pos_bin_edges = np.histogram(pred_proba[np.array(y_test) == 1, 1],
bins=[0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1])
neg_hist, neg_bin_edges = np.histogram(pred_proba[np.array(y_test) == 0, 1],
bins=[0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1])
fig, (ax1, ax2) = plt.subplots(2, 1)
#print pos_hist.shape, pos_bin_edges.shape
#print neg_hist.tolist()
ax1.plot(pos_bin_edges[:-1] + 0.05, pos_hist, color='green', linestyle='solid', label="Positives")
ax1.plot(neg_bin_edges[:-1] + 0.05, neg_hist, color='red', linestyle='solid', label="Negatives")
ax1.set_xlim(0.0, 1.0)
ax1.set_ylim(0.0, max(neg_hist.max(), pos_hist.max()))
ax1.set_xlabel('Threshold')
ax1.set_ylabel('Sample Count')
ax1.set_title('Positive Classification Thresholds')
ax1.legend(loc="upper left")
fpr, tpr, _ = roc_curve(y_test, pred_proba[:, 1])
roc_auc = auc(fpr, tpr)
ax2.plot(fpr, tpr, linewidth=4)
ax2.plot([0, 1], [0, 1], 'r--')
#ax2.xlim([0.0, 1.0])
#ax2.ylim([0.0, 1.05])
ax2.set_xlabel('False Positive Rate')
ax2.set_ylabel('True Positive Rate')
ax2.set_title('Logistic Regression ROC Curve')
#ax2.legend(loc="lower right")
plt.show()
with open("bot_model.lrmdl", "w") as f:
pickle.dump({"model": lr, "relevant_features": lr.coef_ != 0}, f)
| gpl-3.0 |
Nexenta/s3-tests | virtualenv/lib/python2.7/site-packages/gevent/util.py | 10 | 2050 | # Copyright (c) 2009 Denis Bilenko. See LICENSE for details.
__all__ = ['wrap_errors', 'lazy_property']
class wrap_errors(object):
"""Helper to make function return an exception, rather than raise it.
Because every exception that is unhandled by greenlet will be logged,
it is desirable to prevent non-error exceptions from leaving a greenlet.
This can done with simple ``try``/``except`` construct::
def wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except (A, B, C), ex:
return ex
:class:`wrap_errors` provides a shortcut to write that in one line::
wrapped_func = wrap_errors((A, B, C), func)
It also preserves ``__str__`` and ``__repr__`` of the original function.
"""
# QQQ could also support using wrap_errors as a decorator
def __init__(self, errors, func):
"""Make a new function from `func', such that it catches `errors' (an
Exception subclass, or a tuple of Exception subclasses) and return
it as a value.
"""
self.errors = errors
self.func = func
def __call__(self, *args, **kwargs):
func = self.func
try:
return func(*args, **kwargs)
except self.errors, ex:
return ex
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __getattr__(self, item):
return getattr(self.func, item)
# XXX no longer used anywhere, remove it
class lazy_property(object):
'''A decorator similar to :meth:`property` that only calls the *function* once.'''
def __init__(self, function):
import warnings
warnings.warn("gevent.util.lazy_propery is deprecated", DeprecationWarning, stacklevel=2)
self._calculate = function
def __get__(self, obj, _=None):
if obj is None:
return self
value = self._calculate(obj)
setattr(obj, self._calculate.func_name, value)
return value
| mit |
snorp/web-page-replay | mockhttprequest.py | 40 | 2150 | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock instance of ArchivedHttpRequest used for testing."""
class ArchivedHttpRequest(object):
"""Mock instance of ArchivedHttpRequest in HttpArchive."""
def __init__(self, command, host, path, request_body, headers):
"""Initialize an ArchivedHttpRequest.
Args:
command: a string (e.g. 'GET' or 'POST').
host: a host name (e.g. 'www.google.com').
path: a request path (e.g. '/search?q=dogs').
request_body: a request body string for a POST or None.
headers: [(header1, value1), ...] list of tuples
"""
self.command = command
self.host = host
self.path = path
self.request_body = request_body
self.headers = headers
self.trimmed_headers = headers
def __str__(self):
return '%s %s%s %s' % (self.command, self.host, self.path,
self.trimmed_headers)
def __repr__(self):
return repr((self.command, self.host, self.path, self.request_body,
self.trimmed_headers))
def __hash__(self):
"""Return a integer hash to use for hashed collections including dict."""
return hash(repr(self))
def __eq__(self, other):
"""Define the __eq__ method to match the hash behavior."""
return repr(self) == repr(other)
def matches(self, command=None, host=None, path=None):
"""Returns true iff the request matches all parameters."""
return ((command is None or command == self.command) and
(host is None or host == self.host) and
(path is None or path == self.path))
| apache-2.0 |
bonitadecker77/python-for-android | python-modules/twisted/twisted/web/http.py | 52 | 59442 | # -*- test-case-name: twisted.web.test.test_http -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
HyperText Transfer Protocol implementation.
This is used by twisted.web.
Future Plans:
- HTTP client support will at some point be refactored to support HTTP/1.1.
- Accept chunked data from clients in server.
- Other missing HTTP features from the RFC.
Maintainer: Itamar Shtull-Trauring
"""
# system imports
from cStringIO import StringIO
import tempfile
import base64, binascii
import cgi
import socket
import math
import time
import calendar
import warnings
import os
from urlparse import urlparse as _urlparse
from zope.interface import implements
# twisted imports
from twisted.internet import interfaces, reactor, protocol, address
from twisted.internet.defer import Deferred
from twisted.protocols import policies, basic
from twisted.python import log
try: # try importing the fast, C version
from twisted.protocols._c_urlarg import unquote
except ImportError:
from urllib import unquote
from twisted.web.http_headers import _DictHeaders, Headers
protocol_version = "HTTP/1.1"
_CONTINUE = 100
SWITCHING = 101
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
MULTIPLE_CHOICE = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTH_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE_SPACE = 507
NOT_EXTENDED = 510
RESPONSES = {
# 100
_CONTINUE: "Continue",
SWITCHING: "Switching Protocols",
# 200
OK: "OK",
CREATED: "Created",
ACCEPTED: "Accepted",
NON_AUTHORITATIVE_INFORMATION: "Non-Authoritative Information",
NO_CONTENT: "No Content",
RESET_CONTENT: "Reset Content.",
PARTIAL_CONTENT: "Partial Content",
MULTI_STATUS: "Multi-Status",
# 300
MULTIPLE_CHOICE: "Multiple Choices",
MOVED_PERMANENTLY: "Moved Permanently",
FOUND: "Found",
SEE_OTHER: "See Other",
NOT_MODIFIED: "Not Modified",
USE_PROXY: "Use Proxy",
# 306 not defined??
TEMPORARY_REDIRECT: "Temporary Redirect",
# 400
BAD_REQUEST: "Bad Request",
UNAUTHORIZED: "Unauthorized",
PAYMENT_REQUIRED: "Payment Required",
FORBIDDEN: "Forbidden",
NOT_FOUND: "Not Found",
NOT_ALLOWED: "Method Not Allowed",
NOT_ACCEPTABLE: "Not Acceptable",
PROXY_AUTH_REQUIRED: "Proxy Authentication Required",
REQUEST_TIMEOUT: "Request Time-out",
CONFLICT: "Conflict",
GONE: "Gone",
LENGTH_REQUIRED: "Length Required",
PRECONDITION_FAILED: "Precondition Failed",
REQUEST_ENTITY_TOO_LARGE: "Request Entity Too Large",
REQUEST_URI_TOO_LONG: "Request-URI Too Long",
UNSUPPORTED_MEDIA_TYPE: "Unsupported Media Type",
REQUESTED_RANGE_NOT_SATISFIABLE: "Requested Range not satisfiable",
EXPECTATION_FAILED: "Expectation Failed",
# 500
INTERNAL_SERVER_ERROR: "Internal Server Error",
NOT_IMPLEMENTED: "Not Implemented",
BAD_GATEWAY: "Bad Gateway",
SERVICE_UNAVAILABLE: "Service Unavailable",
GATEWAY_TIMEOUT: "Gateway Time-out",
HTTP_VERSION_NOT_SUPPORTED: "HTTP Version not supported",
INSUFFICIENT_STORAGE_SPACE: "Insufficient Storage Space",
NOT_EXTENDED: "Not Extended"
}
CACHED = """Magic constant returned by http.Request methods to set cache
validation headers when the request is conditional and the value fails
the condition."""
# backwards compatability
responses = RESPONSES
# datetime parsing and formatting
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
weekdayname_lower = [name.lower() for name in weekdayname]
monthname_lower = [name and name.lower() for name in monthname]
def urlparse(url):
"""
Parse an URL into six components.
This is similar to L{urlparse.urlparse}, but rejects C{unicode} input
and always produces C{str} output.
@type url: C{str}
@raise TypeError: The given url was a C{unicode} string instead of a
C{str}.
@rtype: six-tuple of str
@return: The scheme, net location, path, params, query string, and fragment
of the URL.
"""
if isinstance(url, unicode):
raise TypeError("url must be str, not unicode")
scheme, netloc, path, params, query, fragment = _urlparse(url)
if isinstance(scheme, unicode):
scheme = scheme.encode('ascii')
netloc = netloc.encode('ascii')
path = path.encode('ascii')
query = query.encode('ascii')
fragment = fragment.encode('ascii')
return scheme, netloc, path, params, query, fragment
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, unquote=unquote):
"""
like cgi.parse_qs, only with custom unquote function
"""
d = {}
items = [s2 for s1 in qs.split("&") for s2 in s1.split(";")]
for item in items:
try:
k, v = item.split("=", 1)
except ValueError:
if strict_parsing:
raise
continue
if v or keep_blank_values:
k = unquote(k.replace("+", " "))
v = unquote(v.replace("+", " "))
if k in d:
d[k].append(v)
else:
d[k] = [v]
return d
def datetimeToString(msSinceEpoch=None):
"""
Convert seconds since epoch to HTTP datetime string.
"""
if msSinceEpoch == None:
msSinceEpoch = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(msSinceEpoch)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
weekdayname[wd],
day, monthname[month], year,
hh, mm, ss)
return s
def datetimeToLogString(msSinceEpoch=None):
"""
Convert seconds since epoch to log datetime string.
"""
if msSinceEpoch == None:
msSinceEpoch = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(msSinceEpoch)
s = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
day, monthname[month], year,
hh, mm, ss)
return s
# a hack so we don't need to recalculate log datetime every hit,
# at the price of a small, unimportant, inaccuracy.
_logDateTime = None
_logDateTimeUsers = 0
_resetLogDateTimeID = None
def _resetLogDateTime():
global _logDateTime
global _resetLogDateTime
global _resetLogDateTimeID
_logDateTime = datetimeToLogString()
_resetLogDateTimeID = reactor.callLater(1, _resetLogDateTime)
def _logDateTimeStart():
global _logDateTimeUsers
if not _logDateTimeUsers:
_resetLogDateTime()
_logDateTimeUsers += 1
def _logDateTimeStop():
global _logDateTimeUsers
_logDateTimeUsers -= 1;
if (not _logDateTimeUsers and _resetLogDateTimeID
and _resetLogDateTimeID.active()):
_resetLogDateTimeID.cancel()
def timegm(year, month, day, hour, minute, second):
"""
Convert time tuple in GMT to seconds since epoch, GMT
"""
EPOCH = 1970
if year < EPOCH:
raise ValueError("Years prior to %d not supported" % (EPOCH,))
assert 1 <= month <= 12
days = 365*(year-EPOCH) + calendar.leapdays(EPOCH, year)
for i in range(1, month):
days = days + calendar.mdays[i]
if month > 2 and calendar.isleap(year):
days = days + 1
days = days + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def stringToDatetime(dateString):
"""
Convert an HTTP date string (one of three formats) to seconds since epoch.
"""
parts = dateString.split()
if not parts[0][0:3].lower() in weekdayname_lower:
# Weekday is stupid. Might have been omitted.
try:
return stringToDatetime("Sun, "+dateString)
except ValueError:
# Guess not.
pass
partlen = len(parts)
if (partlen == 5 or partlen == 6) and parts[1].isdigit():
# 1st date format: Sun, 06 Nov 1994 08:49:37 GMT
# (Note: "GMT" is literal, not a variable timezone)
# (also handles without "GMT")
# This is the normal format
day = parts[1]
month = parts[2]
year = parts[3]
time = parts[4]
elif (partlen == 3 or partlen == 4) and parts[1].find('-') != -1:
# 2nd date format: Sunday, 06-Nov-94 08:49:37 GMT
# (Note: "GMT" is literal, not a variable timezone)
# (also handles without without "GMT")
# Two digit year, yucko.
day, month, year = parts[1].split('-')
time = parts[2]
year=int(year)
if year < 69:
year = year + 2000
elif year < 100:
year = year + 1900
elif len(parts) == 5:
# 3rd date format: Sun Nov 6 08:49:37 1994
# ANSI C asctime() format.
day = parts[2]
month = parts[1]
year = parts[4]
time = parts[3]
else:
raise ValueError("Unknown datetime format %r" % dateString)
day = int(day)
month = int(monthname_lower.index(month.lower()))
year = int(year)
hour, min, sec = map(int, time.split(':'))
return int(timegm(year, month, day, hour, min, sec))
def toChunk(data):
"""
Convert string to a chunk.
@returns: a tuple of strings representing the chunked encoding of data
"""
return ("%x\r\n" % len(data), data, "\r\n")
def fromChunk(data):
"""
Convert chunk to string.
@returns: tuple (result, remaining), may raise ValueError.
"""
prefix, rest = data.split('\r\n', 1)
length = int(prefix, 16)
if length < 0:
raise ValueError("Chunk length must be >= 0, not %d" % (length,))
if not rest[length:length + 2] == '\r\n':
raise ValueError, "chunk must end with CRLF"
return rest[:length], rest[length + 2:]
def parseContentRange(header):
"""
Parse a content-range header into (start, end, realLength).
realLength might be None if real length is not known ('*').
"""
kind, other = header.strip().split()
if kind.lower() != "bytes":
raise ValueError, "a range of type %r is not supported"
startend, realLength = other.split("/")
start, end = map(int, startend.split("-"))
if realLength == "*":
realLength = None
else:
realLength = int(realLength)
return (start, end, realLength)
class StringTransport:
"""
I am a StringIO wrapper that conforms for the transport API. I support
the `writeSequence' method.
"""
def __init__(self):
self.s = StringIO()
def writeSequence(self, seq):
self.s.write(''.join(seq))
def __getattr__(self, attr):
return getattr(self.__dict__['s'], attr)
class HTTPClient(basic.LineReceiver):
"""
A client for HTTP 1.0.
Notes:
You probably want to send a 'Host' header with the name of the site you're
connecting to, in order to not break name based virtual hosting.
@ivar length: The length of the request body in bytes.
@type length: C{int}
@ivar firstLine: Are we waiting for the first header line?
@type firstLine: C{bool}
@ivar __buffer: The buffer that stores the response to the HTTP request.
@type __buffer: A C{StringIO} object.
@ivar _header: Part or all of an HTTP request header.
@type _header: C{str}
"""
length = None
firstLine = True
__buffer = None
_header = ""
def sendCommand(self, command, path):
self.transport.write('%s %s HTTP/1.0\r\n' % (command, path))
def sendHeader(self, name, value):
self.transport.write('%s: %s\r\n' % (name, value))
def endHeaders(self):
self.transport.write('\r\n')
def extractHeader(self, header):
"""
Given a complete HTTP header, extract the field name and value and
process the header.
@param header: a complete HTTP request header of the form
'field-name: value'.
@type header: C{str}
"""
key, val = header.split(':', 1)
val = val.lstrip()
self.handleHeader(key, val)
if key.lower() == 'content-length':
self.length = int(val)
def lineReceived(self, line):
"""
Parse the status line and headers for an HTTP request.
@param line: Part of an HTTP request header. Request bodies are parsed
in L{rawDataReceived}.
@type line: C{str}
"""
if self.firstLine:
self.firstLine = False
l = line.split(None, 2)
version = l[0]
status = l[1]
try:
message = l[2]
except IndexError:
# sometimes there is no message
message = ""
self.handleStatus(version, status, message)
return
if not line:
if self._header != "":
# Only extract headers if there are any
self.extractHeader(self._header)
self.__buffer = StringIO()
self.handleEndHeaders()
self.setRawMode()
return
if line.startswith('\t') or line.startswith(' '):
# This line is part of a multiline header. According to RFC 822, in
# "unfolding" multiline headers you do not strip the leading
# whitespace on the continuing line.
self._header = self._header + line
elif self._header:
# This line starts a new header, so process the previous one.
self.extractHeader(self._header)
self._header = line
else: # First header
self._header = line
def connectionLost(self, reason):
self.handleResponseEnd()
def handleResponseEnd(self):
"""
The response has been completely received.
This callback may be invoked more than once per request.
"""
if self.__buffer is not None:
b = self.__buffer.getvalue()
self.__buffer = None
self.handleResponse(b)
def handleResponsePart(self, data):
self.__buffer.write(data)
def connectionMade(self):
pass
def handleStatus(self, version, status, message):
"""
Called when the status-line is received.
@param version: e.g. 'HTTP/1.0'
@param status: e.g. '200'
@type status: C{str}
@param message: e.g. 'OK'
"""
def handleHeader(self, key, val):
"""
Called every time a header is received.
"""
def handleEndHeaders(self):
"""
Called when all headers have been received.
"""
def rawDataReceived(self, data):
if self.length is not None:
data, rest = data[:self.length], data[self.length:]
self.length -= len(data)
else:
rest = ''
self.handleResponsePart(data)
if self.length == 0:
self.handleResponseEnd()
self.setLineMode(rest)
# response codes that must have empty bodies
NO_BODY_CODES = (204, 304)
class Request:
"""
A HTTP request.
Subclasses should override the process() method to determine how
the request will be processed.
@ivar method: The HTTP method that was used.
@ivar uri: The full URI that was requested (includes arguments).
@ivar path: The path only (arguments not included).
@ivar args: All of the arguments, including URL and POST arguments.
@type args: A mapping of strings (the argument names) to lists of values.
i.e., ?foo=bar&foo=baz&quux=spam results in
{'foo': ['bar', 'baz'], 'quux': ['spam']}.
@type requestHeaders: L{http_headers.Headers}
@ivar requestHeaders: All received HTTP request headers.
@ivar received_headers: Backwards-compatibility access to
C{requestHeaders}. Use C{requestHeaders} instead. C{received_headers}
behaves mostly like a C{dict} and does not provide access to all header
values.
@type responseHeaders: L{http_headers.Headers}
@ivar responseHeaders: All HTTP response headers to be sent.
@ivar headers: Backwards-compatibility access to C{responseHeaders}. Use
C{responseHeaders} instead. C{headers} behaves mostly like a C{dict}
and does not provide access to all header values nor does it allow
multiple values for one header to be set.
@ivar notifications: A C{list} of L{Deferred}s which are waiting for
notification that the response to this request has been finished
(successfully or with an error). Don't use this attribute directly,
instead use the L{Request.notifyFinish} method.
@ivar _disconnected: A flag which is C{False} until the connection over
which this request was received is closed and which is C{True} after
that.
@type _disconnected: C{bool}
"""
implements(interfaces.IConsumer)
producer = None
finished = 0
code = OK
code_message = RESPONSES[OK]
method = "(no method yet)"
clientproto = "(no clientproto yet)"
uri = "(no uri yet)"
startedWriting = 0
chunked = 0
sentLength = 0 # content-length of response, or total bytes sent via chunking
etag = None
lastModified = None
args = None
path = None
content = None
_forceSSL = 0
_disconnected = False
def __init__(self, channel, queued):
"""
@param channel: the channel we're connected to.
@param queued: are we in the request queue, or can we start writing to
the transport?
"""
self.notifications = []
self.channel = channel
self.queued = queued
self.requestHeaders = Headers()
self.received_cookies = {}
self.responseHeaders = Headers()
self.cookies = [] # outgoing cookies
if queued:
self.transport = StringTransport()
else:
self.transport = self.channel.transport
def __setattr__(self, name, value):
"""
Support assignment of C{dict} instances to C{received_headers} for
backwards-compatibility.
"""
if name == 'received_headers':
# A property would be nice, but Request is classic.
self.requestHeaders = headers = Headers()
for k, v in value.iteritems():
headers.setRawHeaders(k, [v])
elif name == 'requestHeaders':
self.__dict__[name] = value
self.__dict__['received_headers'] = _DictHeaders(value)
elif name == 'headers':
self.responseHeaders = headers = Headers()
for k, v in value.iteritems():
headers.setRawHeaders(k, [v])
elif name == 'responseHeaders':
self.__dict__[name] = value
self.__dict__['headers'] = _DictHeaders(value)
else:
self.__dict__[name] = value
def _cleanup(self):
"""
Called when have finished responding and are no longer queued.
"""
if self.producer:
log.err(RuntimeError("Producer was not unregistered for %s" % self.uri))
self.unregisterProducer()
self.channel.requestDone(self)
del self.channel
try:
self.content.close()
except OSError:
# win32 suckiness, no idea why it does this
pass
del self.content
for d in self.notifications:
d.callback(None)
self.notifications = []
# methods for channel - end users should not use these
def noLongerQueued(self):
"""
Notify the object that it is no longer queued.
We start writing whatever data we have to the transport, etc.
This method is not intended for users.
"""
if not self.queued:
raise RuntimeError, "noLongerQueued() got called unnecessarily."
self.queued = 0
# set transport to real one and send any buffer data
data = self.transport.getvalue()
self.transport = self.channel.transport
if data:
self.transport.write(data)
# if we have producer, register it with transport
if (self.producer is not None) and not self.finished:
self.transport.registerProducer(self.producer, self.streamingProducer)
# if we're finished, clean up
if self.finished:
self._cleanup()
def gotLength(self, length):
"""
Called when HTTP channel got length of content in this request.
This method is not intended for users.
@param length: The length of the request body, as indicated by the
request headers. C{None} if the request headers do not indicate a
length.
"""
if length is not None and length < 100000:
self.content = StringIO()
else:
self.content = tempfile.TemporaryFile()
def parseCookies(self):
"""
Parse cookie headers.
This method is not intended for users.
"""
cookieheaders = self.requestHeaders.getRawHeaders("cookie")
if cookieheaders is None:
return
for cookietxt in cookieheaders:
if cookietxt:
for cook in cookietxt.split(';'):
cook = cook.lstrip()
try:
k, v = cook.split('=', 1)
self.received_cookies[k] = v
except ValueError:
pass
def handleContentChunk(self, data):
"""
Write a chunk of data.
This method is not intended for users.
"""
self.content.write(data)
def requestReceived(self, command, path, version):
"""
Called by channel when all data has been received.
This method is not intended for users.
@type command: C{str}
@param command: The HTTP verb of this request. This has the case
supplied by the client (eg, it maybe "get" rather than "GET").
@type path: C{str}
@param path: The URI of this request.
@type version: C{str}
@param version: The HTTP version of this request.
"""
self.content.seek(0,0)
self.args = {}
self.stack = []
self.method, self.uri = command, path
self.clientproto = version
x = self.uri.split('?', 1)
if len(x) == 1:
self.path = self.uri
else:
self.path, argstring = x
self.args = parse_qs(argstring, 1)
# cache the client and server information, we'll need this later to be
# serialized and sent with the request so CGIs will work remotely
self.client = self.channel.transport.getPeer()
self.host = self.channel.transport.getHost()
# Argument processing
args = self.args
ctype = self.requestHeaders.getRawHeaders('content-type')
if ctype is not None:
ctype = ctype[0]
if self.method == "POST" and ctype:
mfd = 'multipart/form-data'
key, pdict = cgi.parse_header(ctype)
if key == 'application/x-www-form-urlencoded':
args.update(parse_qs(self.content.read(), 1))
elif key == mfd:
try:
args.update(cgi.parse_multipart(self.content, pdict))
except KeyError, e:
if e.args[0] == 'content-disposition':
# Parse_multipart can't cope with missing
# content-dispostion headers in multipart/form-data
# parts, so we catch the exception and tell the client
# it was a bad request.
self.channel.transport.write(
"HTTP/1.1 400 Bad Request\r\n\r\n")
self.channel.transport.loseConnection()
return
raise
self.content.seek(0, 0)
self.process()
def __repr__(self):
return '<%s %s %s>'% (self.method, self.uri, self.clientproto)
def process(self):
"""
Override in subclasses.
This method is not intended for users.
"""
pass
# consumer interface
def registerProducer(self, producer, streaming):
"""
Register a producer.
"""
if self.producer:
raise ValueError, "registering producer %s before previous one (%s) was unregistered" % (producer, self.producer)
self.streamingProducer = streaming
self.producer = producer
if self.queued:
if streaming:
producer.pauseProducing()
else:
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
"""
Unregister the producer.
"""
if not self.queued:
self.transport.unregisterProducer()
self.producer = None
# private http response methods
def _sendError(self, code, resp=''):
self.transport.write('%s %s %s\r\n\r\n' % (self.clientproto, code, resp))
# The following is the public interface that people should be
# writing to.
def getHeader(self, key):
"""
Get an HTTP request header.
@type key: C{str}
@param key: The name of the header to get the value of.
@rtype: C{str} or C{NoneType}
@return: The value of the specified header, or C{None} if that header
was not present in the request.
"""
value = self.requestHeaders.getRawHeaders(key)
if value is not None:
return value[-1]
def getCookie(self, key):
"""
Get a cookie that was sent from the network.
"""
return self.received_cookies.get(key)
def notifyFinish(self):
"""
Notify when the response to this request has finished.
@rtype: L{Deferred}
@return: A L{Deferred} which will be triggered when the request is
finished -- with a C{None} value if the request finishes
successfully or with an error if the request is interrupted by an
error (for example, the client closing the connection prematurely).
"""
self.notifications.append(Deferred())
return self.notifications[-1]
def finish(self):
"""
Indicate that all response data has been written to this L{Request}.
"""
if self._disconnected:
raise RuntimeError(
"Request.finish called on a request after its connection was lost; "
"use Request.notifyFinish to keep track of this.")
if self.finished:
warnings.warn("Warning! request.finish called twice.", stacklevel=2)
return
if not self.startedWriting:
# write headers
self.write('')
if self.chunked:
# write last chunk and closing CRLF
self.transport.write("0\r\n\r\n")
# log request
if hasattr(self.channel, "factory"):
self.channel.factory.log(self)
self.finished = 1
if not self.queued:
self._cleanup()
def write(self, data):
"""
Write some data as a result of an HTTP request. The first
time this is called, it writes out response data.
@type data: C{str}
@param data: Some bytes to be sent as part of the response body.
"""
if not self.startedWriting:
self.startedWriting = 1
version = self.clientproto
l = []
l.append('%s %s %s\r\n' % (version, self.code,
self.code_message))
# if we don't have a content length, we send data in
# chunked mode, so that we can support pipelining in
# persistent connections.
if ((version == "HTTP/1.1") and
(self.responseHeaders.getRawHeaders('content-length') is None) and
self.method != "HEAD" and self.code not in NO_BODY_CODES):
l.append("%s: %s\r\n" % ('Transfer-Encoding', 'chunked'))
self.chunked = 1
if self.lastModified is not None:
if self.responseHeaders.hasHeader('last-modified'):
log.msg("Warning: last-modified specified both in"
" header list and lastModified attribute.")
else:
self.responseHeaders.setRawHeaders(
'last-modified',
[datetimeToString(self.lastModified)])
if self.etag is not None:
self.responseHeaders.setRawHeaders('ETag', [self.etag])
for name, values in self.responseHeaders.getAllRawHeaders():
for value in values:
l.append("%s: %s\r\n" % (name, value))
for cookie in self.cookies:
l.append('%s: %s\r\n' % ("Set-Cookie", cookie))
l.append("\r\n")
self.transport.writeSequence(l)
# if this is a "HEAD" request, we shouldn't return any data
if self.method == "HEAD":
self.write = lambda data: None
return
# for certain result codes, we should never return any data
if self.code in NO_BODY_CODES:
self.write = lambda data: None
return
self.sentLength = self.sentLength + len(data)
if data:
if self.chunked:
self.transport.writeSequence(toChunk(data))
else:
self.transport.write(data)
def addCookie(self, k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""
Set an outgoing HTTP cookie.
In general, you should consider using sessions instead of cookies, see
L{twisted.web.server.Request.getSession} and the
L{twisted.web.server.Session} class for details.
"""
cookie = '%s=%s' % (k, v)
if expires is not None:
cookie = cookie +"; Expires=%s" % expires
if domain is not None:
cookie = cookie +"; Domain=%s" % domain
if path is not None:
cookie = cookie +"; Path=%s" % path
if max_age is not None:
cookie = cookie +"; Max-Age=%s" % max_age
if comment is not None:
cookie = cookie +"; Comment=%s" % comment
if secure:
cookie = cookie +"; Secure"
self.cookies.append(cookie)
def setResponseCode(self, code, message=None):
"""
Set the HTTP response code.
"""
if not isinstance(code, (int, long)):
raise TypeError("HTTP response code must be int or long")
self.code = code
if message:
self.code_message = message
else:
self.code_message = RESPONSES.get(code, "Unknown Status")
def setHeader(self, name, value):
"""
Set an HTTP response header. Overrides any previously set values for
this header.
@type name: C{str}
@param name: The name of the header for which to set the value.
@type value: C{str}
@param value: The value to set for the named header.
"""
self.responseHeaders.setRawHeaders(name, [value])
def redirect(self, url):
"""
Utility function that does a redirect.
The request should have finish() called after this.
"""
self.setResponseCode(FOUND)
self.setHeader("location", url)
def setLastModified(self, when):
"""
Set the C{Last-Modified} time for the response to this request.
If I am called more than once, I ignore attempts to set
Last-Modified earlier, only replacing the Last-Modified time
if it is to a later value.
If I am a conditional request, I may modify my response code
to L{NOT_MODIFIED} if appropriate for the time given.
@param when: The last time the resource being returned was
modified, in seconds since the epoch.
@type when: number
@return: If I am a C{If-Modified-Since} conditional request and
the time given is not newer than the condition, I return
L{http.CACHED<CACHED>} to indicate that you should write no
body. Otherwise, I return a false value.
"""
# time.time() may be a float, but the HTTP-date strings are
# only good for whole seconds.
when = long(math.ceil(when))
if (not self.lastModified) or (self.lastModified < when):
self.lastModified = when
modifiedSince = self.getHeader('if-modified-since')
if modifiedSince:
firstPart = modifiedSince.split(';', 1)[0]
try:
modifiedSince = stringToDatetime(firstPart)
except ValueError:
return None
if modifiedSince >= when:
self.setResponseCode(NOT_MODIFIED)
return CACHED
return None
def setETag(self, etag):
"""
Set an C{entity tag} for the outgoing response.
That's \"entity tag\" as in the HTTP/1.1 C{ETag} header, \"used
for comparing two or more entities from the same requested
resource.\"
If I am a conditional request, I may modify my response code
to L{NOT_MODIFIED} or L{PRECONDITION_FAILED}, if appropriate
for the tag given.
@param etag: The entity tag for the resource being returned.
@type etag: string
@return: If I am a C{If-None-Match} conditional request and
the tag matches one in the request, I return
L{http.CACHED<CACHED>} to indicate that you should write
no body. Otherwise, I return a false value.
"""
if etag:
self.etag = etag
tags = self.getHeader("if-none-match")
if tags:
tags = tags.split()
if (etag in tags) or ('*' in tags):
self.setResponseCode(((self.method in ("HEAD", "GET"))
and NOT_MODIFIED)
or PRECONDITION_FAILED)
return CACHED
return None
def getAllHeaders(self):
"""
Return dictionary mapping the names of all received headers to the last
value received for each.
Since this method does not return all header information,
C{self.requestHeaders.getAllRawHeaders()} may be preferred.
"""
headers = {}
for k, v in self.requestHeaders.getAllRawHeaders():
headers[k.lower()] = v[-1]
return headers
def getRequestHostname(self):
"""
Get the hostname that the user passed in to the request.
This will either use the Host: header (if it is available) or the
host we are listening on if the header is unavailable.
@returns: the requested hostname
@rtype: C{str}
"""
# XXX This method probably has no unit tests. I changed it a ton and
# nothing failed.
host = self.getHeader('host')
if host:
return host.split(':', 1)[0]
return self.getHost().host
def getHost(self):
"""
Get my originally requesting transport's host.
Don't rely on the 'transport' attribute, since Request objects may be
copied remotely. For information on this method's return value, see
twisted.internet.tcp.Port.
"""
return self.host
def setHost(self, host, port, ssl=0):
"""
Change the host and port the request thinks it's using.
This method is useful for working with reverse HTTP proxies (e.g.
both Squid and Apache's mod_proxy can do this), when the address
the HTTP client is using is different than the one we're listening on.
For example, Apache may be listening on https://www.example.com, and then
forwarding requests to http://localhost:8080, but we don't want HTML produced
by Twisted to say 'http://localhost:8080', they should say 'https://www.example.com',
so we do::
request.setHost('www.example.com', 443, ssl=1)
@type host: C{str}
@param host: The value to which to change the host header.
@type ssl: C{bool}
@param ssl: A flag which, if C{True}, indicates that the request is
considered secure (if C{True}, L{isSecure} will return C{True}).
"""
self._forceSSL = ssl
self.requestHeaders.setRawHeaders("host", [host])
self.host = address.IPv4Address("TCP", host, port)
def getClientIP(self):
"""
Return the IP address of the client who submitted this request.
@returns: the client IP address
@rtype: C{str}
"""
if isinstance(self.client, address.IPv4Address):
return self.client.host
else:
return None
def isSecure(self):
"""
Return True if this request is using a secure transport.
Normally this method returns True if this request's HTTPChannel
instance is using a transport that implements ISSLTransport.
This will also return True if setHost() has been called
with ssl=True.
@returns: True if this request is secure
@rtype: C{bool}
"""
if self._forceSSL:
return True
transport = getattr(getattr(self, 'channel', None), 'transport', None)
if interfaces.ISSLTransport(transport, None) is not None:
return True
return False
def _authorize(self):
# Authorization, (mostly) per the RFC
try:
authh = self.getHeader("Authorization")
if not authh:
self.user = self.password = ''
return
bas, upw = authh.split()
if bas.lower() != "basic":
raise ValueError
upw = base64.decodestring(upw)
self.user, self.password = upw.split(':', 1)
except (binascii.Error, ValueError):
self.user = self.password = ""
except:
log.err()
self.user = self.password = ""
def getUser(self):
"""
Return the HTTP user sent with this request, if any.
If no user was supplied, return the empty string.
@returns: the HTTP user, if any
@rtype: C{str}
"""
try:
return self.user
except:
pass
self._authorize()
return self.user
def getPassword(self):
"""
Return the HTTP password sent with this request, if any.
If no password was supplied, return the empty string.
@returns: the HTTP password, if any
@rtype: C{str}
"""
try:
return self.password
except:
pass
self._authorize()
return self.password
def getClient(self):
if self.client.type != 'TCP':
return None
host = self.client.host
try:
name, names, addresses = socket.gethostbyaddr(host)
except socket.error:
return host
names.insert(0, name)
for name in names:
if '.' in name:
return name
return names[0]
def connectionLost(self, reason):
"""
There is no longer a connection for this request to respond over.
Clean up anything which can't be useful anymore.
"""
self._disconnected = True
self.channel = None
if self.content is not None:
self.content.close()
for d in self.notifications:
d.errback(reason)
self.notifications = []
class _DataLoss(Exception):
"""
L{_DataLoss} indicates that not all of a message body was received. This
is only one of several possible exceptions which may indicate that data
was lost. Because of this, it should not be checked for by
specifically; any unexpected exception should be treated as having
caused data loss.
"""
class PotentialDataLoss(Exception):
"""
L{PotentialDataLoss} may be raised by a transfer encoding decoder's
C{noMoreData} method to indicate that it cannot be determined if the
entire response body has been delivered. This only occurs when making
requests to HTTP servers which do not set I{Content-Length} or a
I{Transfer-Encoding} in the response because in this case the end of the
response is indicated by the connection being closed, an event which may
also be due to a transient network problem or other error.
"""
class _IdentityTransferDecoder(object):
"""
Protocol for accumulating bytes up to a specified length. This handles the
case where no I{Transfer-Encoding} is specified.
@ivar contentLength: Counter keeping track of how many more bytes there are
to receive.
@ivar dataCallback: A one-argument callable which will be invoked each
time application data is received.
@ivar finishCallback: A one-argument callable which will be invoked when
the terminal chunk is received. It will be invoked with all bytes
which were delivered to this protocol which came after the terminal
chunk.
"""
def __init__(self, contentLength, dataCallback, finishCallback):
self.contentLength = contentLength
self.dataCallback = dataCallback
self.finishCallback = finishCallback
def dataReceived(self, data):
"""
Interpret the next chunk of bytes received. Either deliver them to the
data callback or invoke the finish callback if enough bytes have been
received.
@raise RuntimeError: If the finish callback has already been invoked
during a previous call to this methood.
"""
if self.dataCallback is None:
raise RuntimeError(
"_IdentityTransferDecoder cannot decode data after finishing")
if self.contentLength is None:
self.dataCallback(data)
elif len(data) < self.contentLength:
self.contentLength -= len(data)
self.dataCallback(data)
else:
# Make the state consistent before invoking any code belonging to
# anyone else in case noMoreData ends up being called beneath this
# stack frame.
contentLength = self.contentLength
dataCallback = self.dataCallback
finishCallback = self.finishCallback
self.dataCallback = self.finishCallback = None
self.contentLength = 0
dataCallback(data[:contentLength])
finishCallback(data[contentLength:])
def noMoreData(self):
"""
All data which will be delivered to this decoder has been. Check to
make sure as much data as was expected has been received.
@raise PotentialDataLoss: If the content length is unknown.
@raise _DataLoss: If the content length is known and fewer than that
many bytes have been delivered.
@return: C{None}
"""
finishCallback = self.finishCallback
self.dataCallback = self.finishCallback = None
if self.contentLength is None:
finishCallback('')
raise PotentialDataLoss()
elif self.contentLength != 0:
raise _DataLoss()
class _ChunkedTransferDecoder(object):
"""
Protocol for decoding I{chunked} Transfer-Encoding, as defined by RFC 2616,
section 3.6.1. This protocol can interpret the contents of a request or
response body which uses the I{chunked} Transfer-Encoding. It cannot
interpret any of the rest of the HTTP protocol.
It may make sense for _ChunkedTransferDecoder to be an actual IProtocol
implementation. Currently, the only user of this class will only ever
call dataReceived on it. However, it might be an improvement if the
user could connect this to a transport and deliver connection lost
notification. This way, `dataCallback` becomes `self.transport.write`
and perhaps `finishCallback` becomes `self.transport.loseConnection()`
(although I'm not sure where the extra data goes in that case). This
could also allow this object to indicate to the receiver of data that
the stream was not completely received, an error case which should be
noticed. -exarkun
@ivar dataCallback: A one-argument callable which will be invoked each
time application data is received.
@ivar finishCallback: A one-argument callable which will be invoked when
the terminal chunk is received. It will be invoked with all bytes
which were delivered to this protocol which came after the terminal
chunk.
@ivar length: Counter keeping track of how many more bytes in a chunk there
are to receive.
@ivar state: One of C{'chunk-length'}, C{'trailer'}, C{'body'}, or
C{'finished'}. For C{'chunk-length'}, data for the chunk length line
is currently being read. For C{'trailer'}, the CR LF pair which
follows each chunk is being read. For C{'body'}, the contents of a
chunk are being read. For C{'finished'}, the last chunk has been
completely read and no more input is valid.
@ivar finish: A flag indicating that the last chunk has been started. When
it finishes, the state will change to C{'finished'} and no more data
will be accepted.
"""
state = 'chunk-length'
finish = False
def __init__(self, dataCallback, finishCallback):
self.dataCallback = dataCallback
self.finishCallback = finishCallback
self._buffer = ''
def dataReceived(self, data):
"""
Interpret data from a request or response body which uses the
I{chunked} Transfer-Encoding.
"""
data = self._buffer + data
self._buffer = ''
while data:
if self.state == 'chunk-length':
if '\r\n' in data:
line, rest = data.split('\r\n', 1)
parts = line.split(';')
self.length = int(parts[0], 16)
if self.length == 0:
self.state = 'trailer'
self.finish = True
else:
self.state = 'body'
data = rest
else:
self._buffer = data
data = ''
elif self.state == 'trailer':
if data.startswith('\r\n'):
data = data[2:]
if self.finish:
self.state = 'finished'
self.finishCallback(data)
data = ''
else:
self.state = 'chunk-length'
else:
self._buffer = data
data = ''
elif self.state == 'body':
if len(data) >= self.length:
chunk, data = data[:self.length], data[self.length:]
self.dataCallback(chunk)
self.state = 'trailer'
elif len(data) < self.length:
self.length -= len(data)
self.dataCallback(data)
data = ''
elif self.state == 'finished':
raise RuntimeError(
"_ChunkedTransferDecoder.dataReceived called after last "
"chunk was processed")
def noMoreData(self):
"""
Verify that all data has been received. If it has not been, raise
L{_DataLoss}.
"""
if self.state != 'finished':
raise _DataLoss(
"Chunked decoder in %r state, still expecting more data to "
"get to finished state." % (self.state,))
class HTTPChannel(basic.LineReceiver, policies.TimeoutMixin):
"""
A receiver for HTTP requests.
@ivar _transferDecoder: C{None} or an instance of
L{_ChunkedTransferDecoder} if the request body uses the I{chunked}
Transfer-Encoding.
"""
maxHeaders = 500 # max number of headers allowed per request
length = 0
persistent = 1
__header = ''
__first_line = 1
__content = None
# set in instances or subclasses
requestFactory = Request
_savedTimeOut = None
_receivedHeaderCount = 0
def __init__(self):
# the request queue
self.requests = []
self._transferDecoder = None
def connectionMade(self):
self.setTimeout(self.timeOut)
def lineReceived(self, line):
self.resetTimeout()
if self.__first_line:
# if this connection is not persistent, drop any data which
# the client (illegally) sent after the last request.
if not self.persistent:
self.dataReceived = self.lineReceived = lambda *args: None
return
# IE sends an extraneous empty line (\r\n) after a POST request;
# eat up such a line, but only ONCE
if not line and self.__first_line == 1:
self.__first_line = 2
return
# create a new Request object
request = self.requestFactory(self, len(self.requests))
self.requests.append(request)
self.__first_line = 0
parts = line.split()
if len(parts) != 3:
self.transport.write("HTTP/1.1 400 Bad Request\r\n\r\n")
self.transport.loseConnection()
return
command, request, version = parts
self._command = command
self._path = request
self._version = version
elif line == '':
if self.__header:
self.headerReceived(self.__header)
self.__header = ''
self.allHeadersReceived()
if self.length == 0:
self.allContentReceived()
else:
self.setRawMode()
elif line[0] in ' \t':
self.__header = self.__header+'\n'+line
else:
if self.__header:
self.headerReceived(self.__header)
self.__header = line
def _finishRequestBody(self, data):
self.allContentReceived()
self.setLineMode(data)
def headerReceived(self, line):
"""
Do pre-processing (for content-length) and store this header away.
Enforce the per-request header limit.
@type line: C{str}
@param line: A line from the header section of a request, excluding the
line delimiter.
"""
header, data = line.split(':', 1)
header = header.lower()
data = data.strip()
if header == 'content-length':
self.length = int(data)
self._transferDecoder = _IdentityTransferDecoder(
self.length, self.requests[-1].handleContentChunk, self._finishRequestBody)
elif header == 'transfer-encoding' and data.lower() == 'chunked':
self.length = None
self._transferDecoder = _ChunkedTransferDecoder(
self.requests[-1].handleContentChunk, self._finishRequestBody)
reqHeaders = self.requests[-1].requestHeaders
values = reqHeaders.getRawHeaders(header)
if values is not None:
values.append(data)
else:
reqHeaders.setRawHeaders(header, [data])
self._receivedHeaderCount += 1
if self._receivedHeaderCount > self.maxHeaders:
self.transport.write("HTTP/1.1 400 Bad Request\r\n\r\n")
self.transport.loseConnection()
def allContentReceived(self):
command = self._command
path = self._path
version = self._version
# reset ALL state variables, so we don't interfere with next request
self.length = 0
self._receivedHeaderCount = 0
self.__first_line = 1
self._transferDecoder = None
del self._command, self._path, self._version
# Disable the idle timeout, in case this request takes a long
# time to finish generating output.
if self.timeOut:
self._savedTimeOut = self.setTimeout(None)
req = self.requests[-1]
req.requestReceived(command, path, version)
def rawDataReceived(self, data):
self.resetTimeout()
self._transferDecoder.dataReceived(data)
def allHeadersReceived(self):
req = self.requests[-1]
req.parseCookies()
self.persistent = self.checkPersistence(req, self._version)
req.gotLength(self.length)
def checkPersistence(self, request, version):
"""
Check if the channel should close or not.
@param request: The request most recently received over this channel
against which checks will be made to determine if this connection
can remain open after a matching response is returned.
@type version: C{str}
@param version: The version of the request.
@rtype: C{bool}
@return: A flag which, if C{True}, indicates that this connection may
remain open to receive another request; if C{False}, the connection
must be closed in order to indicate the completion of the response
to C{request}.
"""
connection = request.requestHeaders.getRawHeaders('connection')
if connection:
tokens = map(str.lower, connection[0].split(' '))
else:
tokens = []
# HTTP 1.0 persistent connection support is currently disabled,
# since we need a way to disable pipelining. HTTP 1.0 can't do
# pipelining since we can't know in advance if we'll have a
# content-length header, if we don't have the header we need to close the
# connection. In HTTP 1.1 this is not an issue since we use chunked
# encoding if content-length is not available.
#if version == "HTTP/1.0":
# if 'keep-alive' in tokens:
# request.setHeader('connection', 'Keep-Alive')
# return 1
# else:
# return 0
if version == "HTTP/1.1":
if 'close' in tokens:
request.responseHeaders.setRawHeaders('connection', ['close'])
return False
else:
return True
else:
return False
def requestDone(self, request):
"""
Called by first request in queue when it is done.
"""
if request != self.requests[0]: raise TypeError
del self.requests[0]
if self.persistent:
# notify next request it can start writing
if self.requests:
self.requests[0].noLongerQueued()
else:
if self._savedTimeOut:
self.setTimeout(self._savedTimeOut)
else:
self.transport.loseConnection()
def timeoutConnection(self):
log.msg("Timing out client: %s" % str(self.transport.getPeer()))
policies.TimeoutMixin.timeoutConnection(self)
def connectionLost(self, reason):
self.setTimeout(None)
for request in self.requests:
request.connectionLost(reason)
class HTTPFactory(protocol.ServerFactory):
"""
Factory for HTTP server.
"""
protocol = HTTPChannel
logPath = None
timeOut = 60 * 60 * 12
def __init__(self, logPath=None, timeout=60*60*12):
if logPath is not None:
logPath = os.path.abspath(logPath)
self.logPath = logPath
self.timeOut = timeout
def buildProtocol(self, addr):
p = protocol.ServerFactory.buildProtocol(self, addr)
# timeOut needs to be on the Protocol instance cause
# TimeoutMixin expects it there
p.timeOut = self.timeOut
return p
def startFactory(self):
_logDateTimeStart()
if self.logPath:
self.logFile = self._openLogFile(self.logPath)
else:
self.logFile = log.logfile
def stopFactory(self):
if hasattr(self, "logFile"):
if self.logFile != log.logfile:
self.logFile.close()
del self.logFile
_logDateTimeStop()
def _openLogFile(self, path):
"""
Override in subclasses, e.g. to use twisted.python.logfile.
"""
f = open(path, "a", 1)
return f
def _escape(self, s):
# pain in the ass. Return a string like python repr, but always
# escaped as if surrounding quotes were "".
r = repr(s)
if r[0] == "'":
return r[1:-1].replace('"', '\\"').replace("\\'", "'")
return r[1:-1]
def log(self, request):
"""
Log a request's result to the logfile, by default in combined log format.
"""
if hasattr(self, "logFile"):
line = '%s - - %s "%s" %d %s "%s" "%s"\n' % (
request.getClientIP(),
# request.getUser() or "-", # the remote user is almost never important
_logDateTime,
'%s %s %s' % (self._escape(request.method),
self._escape(request.uri),
self._escape(request.clientproto)),
request.code,
request.sentLength or "-",
self._escape(request.getHeader("referer") or "-"),
self._escape(request.getHeader("user-agent") or "-"))
self.logFile.write(line)
| apache-2.0 |
CoherentLabs/depot_tools | recipes/infra.py | 4 | 1173 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Infra(recipe_util.Recipe):
"""Basic Recipe class for the Infrastructure repositories."""
@staticmethod
def fetch_spec(_props):
solution = lambda name, path_infix = None: {
'name' : name,
'url' : 'https://chromium.googlesource.com/infra/%s%s.git' % (
path_infix + '/' if path_infix else '', name
),
'deps_file': '.DEPS.git',
'managed' : False,
}
spec = {
'solutions': [
solution('infra'),
solution('expect_tests', 'testing'),
solution('testing_support', 'testing'),
],
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'infra'
def main(argv=None):
return Infra().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
laiqiqi886/kbengine | kbe/src/lib/python/Lib/encodings/iso8859_15.py | 272 | 13212 | """ Python Character Mapping Codec iso8859_15 generated from 'MAPPINGS/ISO8859/8859-15.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-15',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\u20ac' # 0xA4 -> EURO SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON
'\xa7' # 0xA7 -> SECTION SIGN
'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE
'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE
'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
avedaee/DIRAC | FrameworkSystem/scripts/dirac-myproxy-upload.py | 14 | 1686 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-proxy-init.py
# Author : Adrian Casajus
########################################################################
__RCSID__ = "$Id$"
import sys
import getpass
import DIRAC
from DIRAC.Core.Base import Script
class Params:
proxyLoc = False
dnAsUsername = False
def setProxyLocation( self, arg ):
self.proxyLoc = arg
return DIRAC.S_OK()
def setDNAsUsername( self, arg ):
self.dnAsUsername = True
return DIRAC.S_OK()
def showVersion( self, arg ):
print "Version:"
print " ", __RCSID__
sys.exit( 0 )
return DIRAC.S_OK()
params = Params()
Script.registerSwitch( "f:", "file=", "File to use as proxy", params.setProxyLocation )
Script.registerSwitch( "D", "DN", "Use DN as myproxy username", params.setDNAsUsername )
Script.registerSwitch( "i", "version", "Print version", params.showVersion )
Script.addDefaultOptionValue( "LogLevel", "always" )
Script.parseCommandLine()
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Security.MyProxy import MyProxy
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security import Locations, CS
if not params.proxyLoc:
params.proxyLoc = Locations.getProxyLocation()
if not params.proxyLoc:
print "Can't find any valid proxy"
sys.exit( 1 )
print "Uploading proxy file %s" % params.proxyLoc
mp = MyProxy()
retVal = mp.uploadProxy( params.proxyLoc, params.dnAsUsername )
if not retVal[ 'OK' ]:
print "Can't upload proxy:"
print " ", retVal[ 'Message' ]
sys.exit( 1 )
print "Proxy uploaded"
sys.exit( 0 )
| gpl-3.0 |
aelthwin/Battle-for-Wesnoth--Zombie-Edition | scons/sdl.py | 5 | 4068 | # vi: syntax=python:et:ts=4
import os
from SCons.Script import *
from config_check_utils import *
def CheckSDL(context, sdl_lib = "SDL", require_version = None):
if require_version:
version = require_version.split(".", 2)
major_version = int(version[0])
minor_version = int(version[1])
try:
patchlevel = int(version[2])
except (ValueError, IndexError):
patch_level = 0
backup = backup_env(context.env, ["CPPPATH", "LIBPATH", "LIBS"])
sdldir = context.env.get("sdldir", "")
if sdl_lib == "SDL":
if require_version:
context.Message("Checking for Simple DirectMedia Layer library version >= %d.%d.%d... " % (major_version, minor_version, patchlevel))
else:
context.Message("Checking for Simple DirectMedia Layer library... ")
env = context.env
if sdldir:
env.AppendUnique(CPPPATH = [os.path.join(sdldir, "include/SDL")], LIBPATH = [os.path.join(sdldir, "lib")])
else:
for foo_config in [
"pkg-config --cflags --libs sdl",
"sdl-config --cflags --libs"
]:
try:
env.ParseConfig(foo_config)
except OSError:
pass
else:
break
if env["PLATFORM"] == "win32":
env.AppendUnique(CCFLAGS = ["-D_GNU_SOURCE"])
env.AppendUnique(LIBS = Split("mingw32 SDLmain SDL"))
env.AppendUnique(LINKFLAGS = ["-mwindows"])
else:
if require_version:
context.Message("Checking for %s library version >= %d.%d.%d... " % (sdl_lib, major_version, minor_version, patchlevel))
else:
context.Message("Checking for %s library... " % sdl_lib)
context.env.AppendUnique(LIBS = [sdl_lib])
test_program = """
#include <%s.h>
\n""" % sdl_lib
if require_version:
test_program += "#if SDL_VERSIONNUM(%s, %s, %s) < SDL_VERSIONNUM(%d, %d, %d)\n#error Library is too old!\n#endif\n" % \
(sdl_lib.upper() + "_MAJOR_VERSION", \
sdl_lib.upper() + "_MINOR_VERSION", \
sdl_lib.upper() + "_PATCHLEVEL", \
major_version, minor_version, patchlevel)
test_program += """
int main(int argc, char** argv)
{
}
\n"""
if context.TryLink(test_program, ".c"):
context.Result("yes")
return True
else:
context.Result("no")
restore_env(context.env, backup)
return False
def CheckOgg(context):
test_program = '''
#include <SDL_mixer.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
Mix_Music* music = Mix_LoadMUS("data/core/music/main_menu.ogg");
if (music == NULL) {
exit(1);
}
exit(0);
}
\n
'''
#context.env.AppendUnique(LIBS = "SDL_mixer")
context.Message("Checking for Ogg Vorbis support in SDL... ")
if context.env["host"]:
context.Result("n/a (cross-compile)")
return True
(result, output) = context.TryRun(test_program, ".c")
if result:
context.Result("yes")
return True
else:
context.Result("no")
return False
def CheckPNG(context):
test_program = '''
#include <SDL_image.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
SDL_RWops *src;
char *testimage = "images/buttons/button-pressed.png";
src = SDL_RWFromFile(testimage, "rb");
if (src == NULL) {
exit(2);
}
exit(!IMG_isPNG(src));
}
\n
'''
context.Message("Checking for PNG support in SDL... ")
(result, output) = context.TryRun(test_program, ".c")
if result:
context.Result("yes")
return True
else:
context.Result("no")
return False
config_checks = { 'CheckSDL' : CheckSDL,
'CheckOgg' : CheckOgg,
'CheckPNG' : CheckPNG }
| gpl-2.0 |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/metrics/python/kernel_tests/set_ops_test.py | 22 | 34996 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for set_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
_DTYPES = set([
tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.string])
def _values(values, dtype):
return np.array(
values,
dtype=(np.unicode if (dtype == tf.string) else dtype.as_numpy_dtype))
def _constant(values, dtype):
return tf.constant(_values(values, dtype), dtype=dtype)
def _dense_to_sparse(dense, dtype):
indices = []
values = []
max_row_len = 0
for row in dense:
max_row_len = max(max_row_len, len(row))
shape = [len(dense), max_row_len]
row_ix = 0
for row in dense:
col_ix = 0
for cell in row:
indices.append([row_ix, col_ix])
values.append(str(cell) if dtype == tf.string else cell)
col_ix += 1
row_ix += 1
return tf.SparseTensor(
tf.constant(indices, tf.int64),
tf.constant(values, dtype),
tf.constant(shape, tf.int64))
class SetOpsTest(test_util.TensorFlowTestCase):
def test_set_size_2d(self):
for dtype in _DTYPES:
self._test_set_size_2d(dtype)
def _test_set_size_2d(self, dtype):
self.assertAllEqual(
[1], self._set_size(_dense_to_sparse([[1]], dtype)))
self.assertAllEqual(
[2, 1], self._set_size(_dense_to_sparse([[1, 9], [1]], dtype)))
self.assertAllEqual(
[3, 0], self._set_size(_dense_to_sparse([[1, 9, 2], []], dtype)))
self.assertAllEqual(
[0, 3], self._set_size(_dense_to_sparse([[], [1, 9, 2]], dtype)))
def test_set_size_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_size_duplicates_2d(dtype)
def _test_set_size_duplicates_2d(self, dtype):
self.assertAllEqual(
[1], self._set_size(_dense_to_sparse([[1, 1, 1, 1, 1, 1]], dtype)))
self.assertAllEqual(
[2, 7, 3, 0, 1],
self._set_size(_dense_to_sparse([
[1, 9],
[6, 7, 8, 8, 6, 7, 5, 3, 3, 0, 6, 6, 9, 0, 0, 0],
[999, 1, -1000],
[],
[-1]
], dtype)))
def test_set_size_3d(self):
for dtype in _DTYPES:
self._test_set_size_3d(dtype)
def test_set_size_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_set_size_3d(dtype, invalid_indices=True)
def _test_set_size_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
], tf.int64)
sp = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
], dtype),
tf.constant([3, 2, 3], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_size(sp)
else:
self.assertAllEqual([
[2, # 0,0
1], # 0,1
[1, # 1,0
3], # 1,1
[0, # 2,0
1] # 2,1
], self._set_size(sp))
def _set_size(self, sparse_data):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_size(sparse_data, validate_indices=True),
tf.contrib.metrics.set_size(sparse_data, validate_indices=False)
]
for op in ops:
self.assertEqual(None, op.get_shape().dims)
self.assertEqual(tf.int32, op.dtype)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0], results[1])
return results[0]
def test_set_intersection_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_intersection_multirow_2d(dtype)
def _test_set_intersection_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 9], dtype)
expected_shape = [2, 2]
expected_counts = [2, 0]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
def test_dense_set_intersection_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_intersection_multirow_2d(dtype)
def _test_dense_set_intersection_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1, 5]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 9], dtype)
expected_shape = [2, 2]
expected_counts = [2, 0]
# Dense to dense.
a = _constant(a_values, dtype)
b = _constant(b_values, dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def test_set_intersection_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_intersection_duplicates_2d(dtype)
def _test_set_intersection_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1]]
expected_indices = [[0, 0]]
expected_values = _values([1], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
# Dense to sparse.
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
def test_set_intersection_3d(self):
for dtype in _DTYPES:
self._test_set_intersection_3d(dtype=dtype)
def test_set_intersection_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_set_intersection_3d(dtype=dtype, invalid_indices=True)
def _test_set_intersection_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_intersection(sp_a, sp_b)
else:
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
# 1,0
[1, 1, 0], [1, 1, 1], # 1,1
# 2,0
[2, 1, 0], # 2,1
# 3,*
]
expected_values = _values([
1, # 0,0
# 0,1
# 1,0
7, 8, # 1,1
# 2,0
5, # 2,1
# 3,*
], dtype)
expected_shape = [4, 2, 2]
expected_counts = [[
1, # 0,0
0 # 0,1
], [
0, # 1,0
2 # 1,1
], [
0, # 2,0
1 # 2,1
], [
0, # 3,0
0 # 3,1
]]
# Sparse to sparse.
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
# NOTE: sparse_to_dense doesn't support uint8 and uint16.
if dtype not in [tf.uint8, tf.uint16]:
# Dense to sparse.
a = tf.cast(
tf.sparse_to_dense(
sp_a.indices,
sp_a.shape,
sp_a.values,
default_value="-1" if dtype == tf.string else -1),
dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_intersection_count(a, sp_b))
# Dense to dense.
b = tf.cast(
tf.sparse_to_dense(
sp_b.indices,
sp_b.shape,
sp_b.values,
default_value="-2" if dtype == tf.string else -2),
dtype=dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def _assert_shapes(self, input_tensor, result_sparse_tensor):
expected_rows = (None if isinstance(input_tensor, tf.SparseTensor) else
input_tensor.get_shape().as_list()[0])
expected_rank = (None if isinstance(input_tensor, tf.SparseTensor) else
input_tensor.get_shape().ndims)
self.assertAllEqual((expected_rows, expected_rank),
result_sparse_tensor.indices.get_shape().as_list())
self.assertAllEqual((expected_rows,),
result_sparse_tensor.values.get_shape().as_list())
self.assertAllEqual((expected_rank,),
result_sparse_tensor.shape.get_shape().as_list())
def _set_intersection(self, a, b):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_intersection(a, b, validate_indices=True),
tf.contrib.metrics.set_intersection(a, b, validate_indices=False)
]
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_intersection_count(self, a, b):
op = tf.contrib.metrics.set_size(tf.contrib.metrics.set_intersection(a, b))
with self.test_session() as sess:
return sess.run(op)
def test_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_difference_multirow_2d(dtype)
def _test_set_difference_multirow_2d(self, dtype):
a_values = [[1, 1, 1], [1, 5, 9], [4, 5, 3], [5, 5, 1]]
b_values = [[], [1, 2], [1, 2, 2], []]
# a - b.
expected_indices = [
[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]
]
expected_values = _values([1, 5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [1, 2, 3, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def test_dense_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_difference_multirow_2d(dtype)
def _test_dense_set_difference_multirow_2d(self, dtype):
a_values = [[1, 5, 9], [4, 5, 3]]
b_values = [[1, 2, 6], [1, 2, 2]]
# a - b.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]
expected_values = _values([5, 9, 3, 4, 5], dtype)
expected_shape = [2, 3]
expected_counts = [2, 3]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
difference = self._set_difference(a, b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_difference_count(a, b, True))
# b - a.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
expected_values = _values([2, 6, 1, 2], dtype)
expected_shape = [2, 2]
expected_counts = [2, 2]
# Dense to dense.
difference = self._set_difference(a, b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, b, False))
def test_sparse_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_multirow_2d(dtype)
def _test_sparse_set_difference_multirow_2d(self, dtype):
sp_a = _dense_to_sparse(
[[], [1, 5, 9], [4, 5, 3, 3, 4, 5], [5, 1]], dtype=dtype)
sp_b = _dense_to_sparse([[], [1, 2], [1, 2, 2], []], dtype=dtype)
# a - b.
expected_indices = [[1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]]
expected_values = _values([5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [0, 2, 3, 2]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def test_set_difference_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_difference_duplicates_2d(dtype)
def _test_set_difference_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1, 2, 2]]
# a - b.
expected_indices = [[0, 0]]
expected_values = _values([3], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, True))
# b - a.
expected_indices = [[0, 0]]
expected_values = _values([2], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, False))
def test_sparse_set_difference_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype)
def test_sparse_set_difference_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype, invalid_indices=True)
def _test_sparse_set_difference_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_difference(sp_a, sp_b, False)
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_difference(sp_a, sp_b, True)
else:
# a-b
expected_indices = [
[0, 0, 0], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], # 1,1
# 2,*
# 3,*
]
expected_values = _values([
9, # 0,0
3, # 0,1
1, # 1,0
9, # 1,1
# 2,*
# 3,*
], dtype)
expected_shape = [4, 2, 1]
expected_counts = [[
1, # 0,0
1 # 0,1
], [
1, # 1,0
1 # 1,1
], [
0, # 2,0
0 # 2,1
], [
0, # 3,0
0 # 3,1
]]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(sp_a, sp_b))
# b-a
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
[1, 0, 0], # 1,0
# 1,1
[2, 0, 0], # 2,0
# 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
]
expected_values = _values([
3, # 0,0
# 0,1
3, # 1,0
# 1,1
2, # 2,0
# 2,1
4, # 3,0
4, # 3,1
], dtype)
expected_shape = [4, 2, 1]
expected_counts = [[
1, # 0,0
0 # 0,1
], [
1, # 1,0
0 # 1,1
], [
1, # 2,0
0 # 2,1
], [
1, # 3,0
1 # 3,1
]]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def _set_difference(self, a, b, aminusb=True):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_difference(
a, b, aminusb=aminusb, validate_indices=True),
tf.contrib.metrics.set_difference(
a, b, aminusb=aminusb, validate_indices=False)
]
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_difference_count(self, a, b, aminusb=True):
op = tf.contrib.metrics.set_size(
tf.contrib.metrics.set_difference(a, b, aminusb))
with self.test_session() as sess:
return sess.run(op)
def test_set_union_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_union_multirow_2d(dtype)
def _test_set_union_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1]]
expected_indices = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [1, 3]]
expected_values = _values([1, 5, 9, 1, 2, 3, 4], dtype)
expected_shape = [2, 4]
expected_counts = [3, 4]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def test_dense_set_union_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_union_multirow_2d(dtype)
def _test_dense_set_union_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1, 2]]
expected_indices = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [1, 3]]
expected_values = _values([1, 5, 9, 1, 2, 3, 4], dtype)
expected_shape = [2, 4]
expected_counts = [3, 4]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
union = self._set_union(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, b))
def test_set_union_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_union_duplicates_2d(dtype)
def _test_set_union_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 3], dtype)
expected_shape = [1, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual([2], self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual([2], self._set_union_count(sp_a, sp_b))
def test_sparse_set_union_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_union_3d(dtype)
def test_sparse_set_union_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_union_3d(dtype, invalid_indices=True)
def _test_sparse_set_union_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[0, 0, 0], [0, 0, 2], # 0,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_union(sp_a, sp_b)
else:
expected_indices = [
[0, 0, 0], [0, 0, 1], [0, 0, 2], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0], [1, 0, 1], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[2, 0, 0], # 2,0
[2, 1, 0], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0], # 3,1
]
expected_values = _values([
1, 3, 9, # 0,0
3, # 0,1
1, 3, # 1,0
7, 8, 9, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4, # 3,1
], dtype)
expected_shape = [4, 2, 3]
expected_counts = [[
3, # 0,0
1 # 0,1
], [
2, # 1,0
3 # 1,1
], [
1, # 2,0
1 # 2,1
], [
1, # 3,0
1 # 3,1
]]
intersection = self._set_union(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def _set_union(self, a, b):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_union(a, b, validate_indices=True),
tf.contrib.metrics.set_union(a, b, validate_indices=False)
]
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_union_count(self, a, b):
op = tf.contrib.metrics.set_size(tf.contrib.metrics.set_union(a, b))
with self.test_session() as sess:
return sess.run(op)
def _assert_set_operation(self, expected_indices, expected_values,
expected_shape, sparse_tensor, dtype):
self.assertAllEqual(expected_indices, sparse_tensor.indices)
self.assertAllEqual(len(expected_indices), len(expected_values))
self.assertAllEqual(len(expected_values), len(sparse_tensor.values))
expected_set = set()
actual_set = set()
last_indices = None
for indices, expected_value, actual_value in zip(
expected_indices, expected_values, sparse_tensor.values):
if dtype == tf.string:
actual_value = actual_value.decode("utf-8")
if last_indices and (last_indices[:-1] != indices[:-1]):
self.assertEqual(
expected_set, actual_set, "Expected %s, got %s, at %s." % (
expected_set, actual_set, indices))
expected_set.clear()
actual_set.clear()
expected_set.add(expected_value)
actual_set.add(actual_value)
last_indices = indices
self.assertEqual(
expected_set, actual_set, "Expected %s, got %s, at %s." % (
expected_set, actual_set, last_indices))
self.assertAllEqual(expected_shape, sparse_tensor.shape)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
sqlalchemy/alembic | alembic/util/messaging.py | 2 | 2692 | from collections.abc import Iterable
import logging
import sys
import textwrap
import warnings
from sqlalchemy.engine import url
from . import sqla_compat
from .compat import binary_type
from .compat import string_types
log = logging.getLogger(__name__)
# disable "no handler found" errors
logging.getLogger("alembic").addHandler(logging.NullHandler())
try:
import fcntl
import termios
import struct
ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0))
_h, TERMWIDTH, _hp, _wp = struct.unpack("HHHH", ioctl)
if TERMWIDTH <= 0: # can occur if running in emacs pseudo-tty
TERMWIDTH = None
except (ImportError, IOError):
TERMWIDTH = None
def write_outstream(stream, *text):
encoding = getattr(stream, "encoding", "ascii") or "ascii"
for t in text:
if not isinstance(t, binary_type):
t = t.encode(encoding, "replace")
t = t.decode(encoding)
try:
stream.write(t)
except IOError:
# suppress "broken pipe" errors.
# no known way to handle this on Python 3 however
# as the exception is "ignored" (noisily) in TextIOWrapper.
break
def status(_statmsg, fn, *arg, **kw):
newline = kw.pop("newline", False)
msg(_statmsg + " ...", newline, True)
try:
ret = fn(*arg, **kw)
write_outstream(sys.stdout, " done\n")
return ret
except:
write_outstream(sys.stdout, " FAILED\n")
raise
def err(message):
log.error(message)
msg("FAILED: %s" % message)
sys.exit(-1)
def obfuscate_url_pw(u):
u = url.make_url(u)
if u.password:
if sqla_compat.sqla_14:
u = u.set(password="XXXXX")
else:
u.password = "XXXXX"
return str(u)
def warn(msg, stacklevel=2):
warnings.warn(msg, UserWarning, stacklevel=stacklevel)
def msg(msg, newline=True, flush=False):
if TERMWIDTH is None:
write_outstream(sys.stdout, msg)
if newline:
write_outstream(sys.stdout, "\n")
else:
# left indent output lines
lines = textwrap.wrap(msg, TERMWIDTH)
if len(lines) > 1:
for line in lines[0:-1]:
write_outstream(sys.stdout, " ", line, "\n")
write_outstream(sys.stdout, " ", lines[-1], ("\n" if newline else ""))
if flush:
sys.stdout.flush()
def format_as_comma(value):
if value is None:
return ""
elif isinstance(value, string_types):
return value
elif isinstance(value, Iterable):
return ", ".join(value)
else:
raise ValueError("Don't know how to comma-format %r" % value)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.