repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
coodoing/piconv | chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| apache-2.0 |
beatrizjesus/my-first-blog | pasta/Lib/site-packages/django/dispatch/dispatcher.py | 66 | 12069 | import sys
import threading
import weakref
from django.utils.six.moves import range
if sys.version_info < (3, 4):
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if sys.version_info >= (3, 4):
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| mit |
lindsayad/sympy | sympy/polys/domains/tests/test_polynomialring.py | 99 | 3314 | """Tests for the PolynomialRing classes. """
from sympy.polys.domains import QQ, ZZ
from sympy.polys.polyerrors import ExactQuotientFailed, CoercionFailed, NotReversible
from sympy.abc import x, y
from sympy.utilities.pytest import raises
def test_build_order():
R = QQ.old_poly_ring(x, y, order=(("lex", x), ("ilex", y)))
assert R.order((1, 5)) == ((1,), (-5,))
def test_globalring():
Qxy = QQ.old_frac_field(x, y)
R = QQ.old_poly_ring(x, y)
X = R.convert(x)
Y = R.convert(y)
assert x in R
assert 1/x not in R
assert 1/(1 + x) not in R
assert Y in R
assert X.ring == R
assert X * (Y**2 + 1) == R.convert(x * (y**2 + 1))
assert X * y == X * Y == R.convert(x * y) == x * Y
assert X + y == X + Y == R.convert(x + y) == x + Y
assert X - y == X - Y == R.convert(x - y) == x - Y
assert X + 1 == R.convert(x + 1)
raises(ExactQuotientFailed, lambda: X/Y)
raises(ExactQuotientFailed, lambda: x/Y)
raises(ExactQuotientFailed, lambda: X/y)
assert X**2 / X == X
assert R.from_GlobalPolynomialRing(ZZ.old_poly_ring(x, y).convert(x), ZZ.old_poly_ring(x, y)) == X
assert R.from_FractionField(Qxy.convert(x), Qxy) == X
assert R.from_FractionField(Qxy.convert(x)/y, Qxy) is None
assert R._sdm_to_vector(R._vector_to_sdm([X, Y], R.order), 2) == [X, Y]
def test_localring():
Qxy = QQ.old_frac_field(x, y)
R = QQ.old_poly_ring(x, y, order="ilex")
X = R.convert(x)
Y = R.convert(y)
assert x in R
assert 1/x not in R
assert 1/(1 + x) in R
assert Y in R
assert X.ring == R
assert X*(Y**2 + 1)/(1 + X) == R.convert(x*(y**2 + 1)/(1 + x))
assert X*y == X*Y
raises(ExactQuotientFailed, lambda: X/Y)
raises(ExactQuotientFailed, lambda: x/Y)
raises(ExactQuotientFailed, lambda: X/y)
assert X + y == X + Y == R.convert(x + y) == x + Y
assert X - y == X - Y == R.convert(x - y) == x - Y
assert X + 1 == R.convert(x + 1)
assert X**2 / X == X
assert R.from_GlobalPolynomialRing(ZZ.old_poly_ring(x, y).convert(x), ZZ.old_poly_ring(x, y)) == X
assert R.from_FractionField(Qxy.convert(x), Qxy) == X
raises(CoercionFailed, lambda: R.from_FractionField(Qxy.convert(x)/y, Qxy))
raises(ExactQuotientFailed, lambda: X/Y)
raises(NotReversible, lambda: X.invert())
assert R._sdm_to_vector(
R._vector_to_sdm([X/(X + 1), Y/(1 + X*Y)], R.order), 2) == \
[X*(1 + X*Y), Y*(1 + X)]
def test_conversion():
L = QQ.old_poly_ring(x, y, order="ilex")
G = QQ.old_poly_ring(x, y)
assert L.convert(x) == L.convert(G.convert(x), G)
assert G.convert(x) == G.convert(L.convert(x), L)
raises(CoercionFailed, lambda: G.convert(L.convert(1/(1 + x)), L))
def test_units():
R = QQ.old_poly_ring(x)
assert R.is_unit(R.convert(1))
assert R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert not R.is_unit(R.convert(1 + x))
R = QQ.old_poly_ring(x, order='ilex')
assert R.is_unit(R.convert(1))
assert R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert R.is_unit(R.convert(1 + x))
R = ZZ.old_poly_ring(x)
assert R.is_unit(R.convert(1))
assert not R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert not R.is_unit(R.convert(1 + x))
| bsd-3-clause |
minhphung171093/OpenERP_V8 | openerp/addons/mass_mailing/tests/test_mail.py | 388 | 1221 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
class test_message_compose(TestMail):
def test_OO_mail_mail_tracking(self):
""" Tests designed for mail_mail tracking (opened, replied, bounced) """
pass
| agpl-3.0 |
Cinntax/home-assistant | homeassistant/components/template/switch.py | 1 | 9058 | """Support for switches which integrates with other components."""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
SwitchDevice,
PLATFORM_SCHEMA,
)
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
CONF_VALUE_TEMPLATE,
CONF_ICON_TEMPLATE,
CONF_ENTITY_PICTURE_TEMPLATE,
STATE_OFF,
STATE_ON,
ATTR_ENTITY_ID,
CONF_SWITCHES,
EVENT_HOMEASSISTANT_START,
MATCH_ALL,
)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.script import Script
from .const import CONF_AVAILABILITY_TEMPLATE
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_ON, STATE_OFF, "true", "false"]
ON_ACTION = "turn_on"
OFF_ACTION = "turn_off"
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Required(ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Required(OFF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Template switch."""
switches = []
for device, device_config in config[CONF_SWITCHES].items():
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
state_template = device_config[CONF_VALUE_TEMPLATE]
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
on_action = device_config[ON_ACTION]
off_action = device_config[OFF_ACTION]
manual_entity_ids = device_config.get(ATTR_ENTITY_ID)
entity_ids = set()
templates = {
CONF_VALUE_TEMPLATE: state_template,
CONF_ICON_TEMPLATE: icon_template,
CONF_ENTITY_PICTURE_TEMPLATE: entity_picture_template,
CONF_AVAILABILITY_TEMPLATE: availability_template,
}
invalid_templates = []
for template_name, template in templates.items():
if template is not None:
template.hass = hass
if manual_entity_ids is not None:
continue
template_entity_ids = template.extract_entities()
if template_entity_ids == MATCH_ALL:
invalid_templates.append(template_name.replace("_template", ""))
entity_ids = MATCH_ALL
elif entity_ids != MATCH_ALL:
entity_ids |= set(template_entity_ids)
if invalid_templates:
_LOGGER.warning(
"Template sensor %s has no entity ids configured to track nor"
" were we able to extract the entities to track from the %s "
"template(s). This entity will only be able to be updated "
"manually.",
device,
", ".join(invalid_templates),
)
else:
if manual_entity_ids is None:
entity_ids = list(entity_ids)
else:
entity_ids = manual_entity_ids
switches.append(
SwitchTemplate(
hass,
device,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
entity_ids,
)
)
if not switches:
_LOGGER.error("No switches added")
return False
async_add_entities(switches)
return True
class SwitchTemplate(SwitchDevice):
"""Representation of a Template switch."""
def __init__(
self,
hass,
device_id,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
entity_ids,
):
"""Initialize the Template switch."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._template = state_template
self._on_script = Script(hass, on_action)
self._off_script = Script(hass, off_action)
self._state = False
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._availability_template = availability_template
self._icon = None
self._entity_picture = None
self._entities = entity_ids
self._available = True
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_switch_state_listener(entity, old_state, new_state):
"""Handle target device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_switch_startup(event):
"""Update template on startup."""
async_track_state_change(
self.hass, self._entities, template_switch_state_listener
)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_switch_startup
)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def entity_picture(self):
"""Return the entity_picture to use in the frontend, if any."""
return self._entity_picture
@property
def available(self) -> bool:
"""Return if the device is available."""
return self._available
async def async_turn_on(self, **kwargs):
"""Fire the on action."""
await self._on_script.async_run(context=self._context)
async def async_turn_off(self, **kwargs):
"""Fire the off action."""
await self._off_script.async_run(context=self._context)
async def async_update(self):
"""Update the state from the template."""
try:
state = self._template.async_render().lower()
if state in _VALID_STATES:
self._state = state in ("true", STATE_ON)
else:
_LOGGER.error(
"Received invalid switch is_on state: %s. Expected: %s",
state,
", ".join(_VALID_STATES),
)
self._state = None
except TemplateError as ex:
_LOGGER.error(ex)
self._state = None
for property_name, template in (
("_icon", self._icon_template),
("_entity_picture", self._entity_picture_template),
("_available", self._availability_template),
):
if template is None:
continue
try:
value = template.async_render()
if property_name == "_available":
value = value.lower() == "true"
setattr(self, property_name, value)
except TemplateError as ex:
friendly_property_name = property_name[1:].replace("_", " ")
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render %s template %s," " the state is unknown.",
friendly_property_name,
self._name,
)
return
try:
setattr(self, property_name, getattr(super(), property_name))
except AttributeError:
_LOGGER.error(
"Could not render %s template %s: %s",
friendly_property_name,
self._name,
ex,
)
| apache-2.0 |
jamesthechamp/zamboni | mkt/comm/tests/test_utils_.py | 11 | 4275 | from django.core.files.uploadedfile import SimpleUploadedFile
import mock
from nose.tools import eq_
from mkt.comm.forms import CommAttachmentFormSet
from mkt.comm.tests.test_views import AttachmentManagementMixin
from mkt.comm.utils import create_comm_note
from mkt.constants import comm
from mkt.site.tests import TestCase, user_factory
from mkt.site.utils import app_factory
class TestCreateCommNote(TestCase, AttachmentManagementMixin):
def setUp(self):
self.contact = user_factory(email='contact')
self.user = user_factory()
self.grant_permission(self.user, '*:*')
self.app = app_factory(mozilla_contact=self.contact.email)
def test_create_thread(self):
# Default permissions.
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'huehue',
note_type=comm.APPROVAL)
# Check Thread.
eq_(thread.addon, self.app)
eq_(thread.version, self.app.current_version)
expected = {
'public': False, 'developer': True, 'reviewer': True,
'senior_reviewer': True, 'mozilla_contact': True, 'staff': True}
for perm, has_perm in expected.items():
eq_(getattr(thread, 'read_permission_%s' % perm), has_perm, perm)
# Check Note.
eq_(note.thread, thread)
eq_(note.author, self.user)
eq_(note.body, 'huehue')
eq_(note.note_type, comm.APPROVAL)
# Check CC.
eq_(thread.thread_cc.count(), 2)
assert thread.thread_cc.filter(user=self.contact).exists()
assert thread.thread_cc.filter(user=self.user).exists()
def test_create_note_existing_thread(self):
# Initial note.
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'huehue')
# Second note from contact.
thread, reply = create_comm_note(
self.app, self.app.current_version, self.contact, 'euheuh!',
note_type=comm.REJECTION)
# Third person joins thread.
thread, last_word = create_comm_note(
self.app, self.app.current_version, user_factory(), 'euheuh!',
note_type=comm.MORE_INFO_REQUIRED)
eq_(thread.thread_cc.count(), 3)
def test_create_note_no_author(self):
thread, note = create_comm_note(
self.app, self.app.current_version, None, 'huehue')
eq_(note.author, None)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_create_note_reviewer_type(self):
for note_type in comm.REVIEWER_NOTE_TYPES:
thread, note = create_comm_note(
self.app, self.app.current_version, None, 'huehue',
note_type=note_type)
eq_(note.read_permission_developer, False)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_custom_perms(self):
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'escalatedquickly',
note_type=comm.ESCALATION, perms={'developer': False,
'staff': True})
expected = {
'public': False, 'developer': False, 'reviewer': True,
'senior_reviewer': True, 'mozilla_contact': True, 'staff': True}
for perm, has_perm in expected.items():
eq_(getattr(thread, 'read_permission_%s' % perm), has_perm, perm)
@mock.patch('mkt.comm.utils.post_create_comm_note', new=mock.Mock)
def test_attachments(self):
attach_formdata = self._attachment_management_form(num=2)
attach_formdata.update(self._attachments(num=2))
attach_formset = CommAttachmentFormSet(
attach_formdata,
{'form-0-attachment':
SimpleUploadedFile(
'lol', attach_formdata['form-0-attachment'].read()),
'form-1-attachment':
SimpleUploadedFile(
'lol2', attach_formdata['form-1-attachment'].read())})
thread, note = create_comm_note(
self.app, self.app.current_version, self.user, 'lol',
note_type=comm.APPROVAL, attachments=attach_formset)
eq_(note.attachments.count(), 2)
| bsd-3-clause |
kiyukuta/chainer | chainer/functions/normalization/batch_normalization.py | 4 | 16382 | import numpy
import chainer
from chainer import configuration
from chainer import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
def _as4darray(arr):
if arr.ndim == 0:
return arr.reshape(1, 1, 1, 1)
elif arr.ndim == 4:
return arr
else:
return arr.reshape(arr.shape[0], -1, 1, 1)
def _xhat(x, mean, std, expander):
x_mu = x - mean[expander]
x_mu /= std[expander]
return x_mu
class BatchNormalizationFunction(function.Function):
def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9):
self.running_mean = mean
self.running_var = var
# Note: cuDNN v5 requires that eps be greater than 1e-5. Otherwise, an
# error will occur.
# See CUDNN_BN_MIN_EPSILON value in cudnn.h to verify minimum allowable
# value.
self.eps = eps
if chainer.should_use_cudnn('>=auto'):
if eps < 1e-5:
msg = 'cuDNN does not allow an eps value less than 1e-5.'
raise RuntimeError(msg)
self.mean_cache = None
self.decay = decay
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 5:
raise type_check.InvalidType(
'%s or %s' % (in_types.size() == 3, in_types.size() == 5),
'%s == %s' % (in_types.size(), n_in))
x_type, gamma_type, beta_type = in_types[:3]
M = type_check.eval(gamma_type.ndim)
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= gamma_type.ndim + 1,
x_type.shape[1:1 + M] == gamma_type.shape,
# TODO(beam2d): Check shape
gamma_type.dtype == x_type.dtype,
beta_type.dtype == x_type.dtype,
gamma_type.shape == beta_type.shape,
)
if len(in_types) == 5:
mean_type, var_type = in_types[3:]
type_check.expect(
mean_type.dtype == x_type.dtype,
mean_type.shape == gamma_type.shape,
var_type.dtype == x_type.dtype,
var_type.shape == gamma_type.shape,
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x, gamma, beta = inputs[:3]
if configuration.config.train:
if self.running_mean is None:
self.running_mean = xp.zeros_like(gamma)
self.running_var = xp.zeros_like(gamma)
else:
self.running_mean = xp.array(self.running_mean)
self.running_var = xp.array(self.running_var)
elif len(inputs) == 5:
self.fixed_mean = inputs[3]
self.fixed_var = inputs[4]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
gamma = gamma[expander]
beta = beta[expander]
# cuDNN only supports these tensor dimensions because they are
# the most commonly used. If there is a need to support other
# dimensions with cuDNN, we could consider reshaping the input
# into a 2-dim array with channels as second dim and m=<product
# of all dimensions except the 2nd dimension> as the first
# dimension.
cudnn_dim_ok = x.ndim == 2 or (x.ndim == 4 and head_ndim == 2)
# TODO(bkvogel): Check for float16 support again in next cuDNN version.
# cuDNN v5 batch normalization does not seem to support float16.
self._can_use_cudnn = cudnn_dim_ok and x[0].dtype != numpy.float16
cudnn_updated_running_stats = False
if (xp is not numpy and chainer.should_use_cudnn('>=auto', 5000) and
self._can_use_cudnn):
x = cuda.cupy.ascontiguousarray(x)
if x.ndim == 4 and head_ndim == 2:
# for convolutional layer
self.mode = libcudnn.CUDNN_BATCHNORM_SPATIAL
else:
# for linear layer
self.mode = libcudnn.CUDNN_BATCHNORM_PER_ACTIVATION
gamma = cuda.cupy.ascontiguousarray(gamma)
beta = cuda.cupy.ascontiguousarray(beta)
dtype = x.dtype
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(_as4darray(x))
derivedBnDesc = cudnn.create_uninitialized_tensor_descriptor()
libcudnn.deriveBNTensorDescriptor(derivedBnDesc.value,
x_desc.value, self.mode)
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
y = cuda.cupy.empty_like(x)
# Factor used in the moving average
factor = 1 - self.decay
if configuration.config.train:
if self.mean_cache is None:
# Output cache to speed up backward pass.
self.mean_cache = xp.empty_like(gamma)
# Output cache to speed up backward pass.
self.var_cache = xp.empty_like(gamma)
# Note: cuDNN computes the mini-batch mean and variance
# internally. We can simply (optionally) pass
# it the running-average mean and variance arrays.
libcudnn.batchNormalizationForwardTraining(
handle, self.mode, one.data, zero.data,
x_desc.value, x.data.ptr, x_desc.value,
y.data.ptr, derivedBnDesc.value, gamma.data.ptr,
beta.data.ptr, factor, self.running_mean.data.ptr,
self.running_var.data.ptr, self.eps,
self.mean_cache.data.ptr, self.var_cache.data.ptr)
cudnn_updated_running_stats = True
else:
libcudnn.batchNormalizationForwardInference(
handle, self.mode, one.data, zero.data,
x_desc.value, x.data.ptr, x_desc.value, y.data.ptr,
derivedBnDesc.value, gamma.data.ptr, beta.data.ptr,
self.fixed_mean.data.ptr, self.fixed_var.data.ptr,
self.eps)
else:
if configuration.config.train:
axis = (0,) + tuple(range(head_ndim, x.ndim))
mean = x.mean(axis=axis)
var = x.var(axis=axis)
var += self.eps
else:
mean = self.fixed_mean
var = self.fixed_var + self.eps
self.std = xp.sqrt(var, dtype=var.dtype)
if xp is numpy:
self.x_hat = _xhat(x, mean, self.std, expander)
y = gamma * self.x_hat
y += beta
else:
self.x_hat, y = cuda.elementwise(
'T x, T mean, T std, T gamma, T beta', 'T x_hat, T y',
'''
x_hat = (x - mean) / std;
y = gamma * x_hat + beta;
''',
'bn_fwd')(x, mean[expander], self.std[expander], gamma,
beta)
if configuration.config.train and (not cudnn_updated_running_stats):
# Note: If in training mode, the cuDNN forward training function
# will do this for us, so
# only run following code if cuDNN was not used.
# Update running statistics:
m = x.size // gamma.size
adjust = m / max(m - 1., 1.) # unbiased estimation
self.running_mean *= self.decay
temp_ar = xp.array(mean)
temp_ar *= (1 - self.decay)
self.running_mean += temp_ar
del temp_ar
self.running_var *= self.decay
temp_ar = xp.array(var)
temp_ar *= (1 - self.decay) * adjust
self.running_var += temp_ar
del temp_ar
return y,
def backward(self, inputs, grad_outputs):
x, gamma = inputs[:2]
gy = grad_outputs[0]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
m = gamma.dtype.type(x.size // gamma.size)
axis = (0,) + tuple(range(head_ndim, x.ndim))
xp = cuda.get_array_module(x)
if len(inputs) == 5:
# This case is unlikely to be used in practice and so does not
# need to be optimized for performance.
mean = inputs[3]
var = inputs[4]
std = xp.sqrt(var, dtype=var.dtype)
gs = gamma / std
gbeta = gy.sum(axis=axis)
x_hat = _xhat(x, mean, std, expander)
ggamma = (gy * x_hat).sum(axis=axis)
gmean = -gs * gbeta
gvar = -0.5 * gamma / var * ggamma
gx = gs[expander] * gy
return gx, ggamma, gbeta, gmean, gvar
# Note: If length of inputs is not 5, we must be in train mode.
assert configuration.config.train
if (xp is not numpy and chainer.should_use_cudnn('>=auto', 5000) and
self._can_use_cudnn):
# Note: cuDNN batch normalization backward only works in
# "training mode." That is, it does not support
# computing gradients in fixed-mean-variance mode, because there
# is normally no reason to call backward()
# while in test/evaluation mode.
x = cuda.cupy.ascontiguousarray(x)
gamma = cuda.cupy.ascontiguousarray(gamma)
gy = cuda.cupy.ascontiguousarray(gy)
dtype = x.dtype
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(_as4darray(x))
derivedBnDesc = cudnn.create_uninitialized_tensor_descriptor()
libcudnn.deriveBNTensorDescriptor(derivedBnDesc.value,
x_desc.value, self.mode)
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
gx = cuda.cupy.empty_like(x)
ggamma = cuda.cupy.empty_like(gamma)
gbeta = cuda.cupy.empty_like(gamma)
libcudnn.batchNormalizationBackward(
handle, self.mode, one.data, zero.data,
one.data, zero.data, x_desc.value, x.data.ptr,
x_desc.value, gy.data.ptr, x_desc.value, gx.data.ptr,
derivedBnDesc.value, gamma.data.ptr,
ggamma.data.ptr, gbeta.data.ptr,
self.eps, self.mean_cache.data.ptr, self.var_cache.data.ptr)
else:
gbeta = gy.sum(axis=axis)
ggamma = (gy * self.x_hat).sum(axis=axis)
if xp is numpy:
gx = (gamma / self.std)[expander] * (
gy - (self.x_hat * ggamma[expander] + gbeta[expander]) / m)
else:
inv_m = numpy.float32(1) / m
gx = cuda.elementwise(
'T gy, T x_hat, T gamma, T std, T ggamma, T gbeta, \
T inv_m',
'T gx',
'gx = (gamma / std) * (gy - (x_hat * ggamma + gbeta) * \
inv_m)',
'bn_bwd')(gy, self.x_hat, gamma[expander],
self.std[expander], ggamma[expander],
gbeta[expander], inv_m)
return gx, ggamma, gbeta
def batch_normalization(x, gamma, beta, **kwargs):
"""batch_normalization(x, gamma, beta, eps=2e-5, running_mean=None, running_var=None, decay=0.9)
Batch normalization function.
It takes the input variable ``x`` and two parameter variables ``gamma`` and
``beta``. The parameter variables must both have the same dimensionality,
which is referred to as the channel shape. This channel shape corresponds
to the dimensions in the input which are not averaged over. Since the
first dimension of the input corresponds to the batch size, the second
dimension of `x` will correspond to the first dimension of the channel
shape, the third dimension of `x` will correspond to the second channel
dimension (if it exists) and so on. Therefore, the dimensionality of the
input must be at least one plus the number of channel dimensions. The
total effective "batch size" will then be considered to be the product of
all dimensions in `x` except for the channel dimensions.
As an example, if the input is four dimensional and the parameter
variables are one dimensional, then it is assumed that the first
dimension of the input is the batch size, the second dimension is the
channel size, and the remaining two dimensions are considered
to be spatial dimensions that will be averaged over along with the
batch size in the batch normalization computations. That is,
the total batch size will be considered to be the product of all
input dimensions except the second dimension.
Note: If this function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to access the updated running statistics, it is necessary
to get a new instance of the function object, call the object, and then
access the running_mean and/or running_var attributes. See the
corresponding Link class for an example of how to do this.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (Variable): Input variable.
gamma (Variable): Scaling parameter of normalized data.
beta (Variable): Shifting parameter of scaled normalized data.
eps (float): Epsilon value for numerical stability.
running_mean (array): Running average of the mean. This is a
running average of the mean over several mini-batches using
the decay parameter. If ``None``, the running average is not
computed. If this is ``None``, then ``runnng_var`` must also
be ``None``.
running_var (array): Running average of the variance. This is a
running average of the variance over several mini-batches using
the decay parameter. If ``None``, the running average is not
computed. If this is ``None``, then ``running_mean`` must also
be ``None``.
decay (float): Decay rate of moving average. It is used during
training.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso:: :class:`links.BatchNormalization`
""" # NOQA
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
eps, running_mean, running_var, decay = argument.parse_kwargs(
kwargs, ('eps', 2e-5), ('running_mean', None),
('running_var', None), ('decay', 0.9))
return BatchNormalizationFunction(eps, running_mean, running_var,
decay)(x, gamma, beta)
def fixed_batch_normalization(x, gamma, beta, mean, var, eps=2e-5):
"""Batch normalization function with fixed statistics.
This is a variant of batch normalization, where the mean and variance
statistics are given by the caller as fixed variables. This is
used on testing mode of the batch normalization layer, where batch
statistics cannot be used for prediction consistency.
Args:
x (Variable): Input variable.
gamma (Variable): Scaling parameter of normalized data.
beta (Variable): Shifting parameter of scaled normalized data.
mean (Variable): Shifting parameter of input.
var (Variable): Square of scaling parameter of input.
eps (float): Epsilon value for numerical stability.
.. seealso::
:func:`functions.batch_normalization`,
:class:`links.BatchNormalization`
"""
with configuration.using_config('train', False):
return BatchNormalizationFunction(eps, None, None, 0.0)(
x, gamma, beta, mean, var)
| mit |
WillianPaiva/1flow | oneflow/core/tasks/reprocess.py | 1 | 4470 | # -*- coding: utf-8 -*-
u"""
Copyright 2013-2014 Olivier Cortès <oc@1flow.io>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
from constance import config
from celery import task
# from django.utils.translation import ugettext_lazy as _
from ..models.reldb import (
Article,
article_post_create_task,
)
from oneflow.base.utils import RedisExpiringLock
from oneflow.base.utils.dateutils import naturaldelta, benchmark
LOGGER = logging.getLogger(__name__)
EXPIRY_REPROCESS_PASS1 = 3500
EXPIRY_REPROCESS_PASS2 = 3600 * 23
EXPIRY_REPROCESS_PASS3 = 3600 * 24 * 6
@task(name='oneflow.core.tasks.reprocess_failed_articles',
queue='check', expire=EXPIRY_REPROCESS_PASS1)
def reprocess_failed_articles(failed=None, expiry=None,
limit=None, force=False):
u""" Reprocess articles that failed absolutization.
In case there was a temporary error, this could lead to more good articles.
"""
if config.ARTICLE_REPROCESSING_DISABLED:
# Do not raise any .retry(), this is a scheduled task.
LOGGER.warning(u'Articles reprocess disabled in configuration.')
return
if failed is None:
failed = Article.objects.url_error().created_previous_hour()
expiry = EXPIRY_REPROCESS_PASS1
# TODO: as the celery tasks expires,
# the lock is probably not needed anymore.
my_lock = RedisExpiringLock(
'reprocess_failed_articles_' + str(expiry),
expire_time=expiry
)
if not my_lock.acquire():
if force:
my_lock.release()
my_lock.acquire()
LOGGER.warning(u'Forcing failed articles reprocessing…')
else:
# Avoid running this task over and over again in the queue
# if the previous instance did not yet terminate. Happens
# when scheduled task runs too quickly.
LOGGER.warning(u'reprocess_failed_articles() is already locked, '
u'aborting.')
return
failed_count = failed.count()
with benchmark((u'Reprocess_failed_articles(expiry=%s): %s '
u'post_create() tasks chains relaunched.')
% (naturaldelta(expiry), failed_count)):
try:
for article in failed:
article.url_error = None
article.save()
article_post_create_task.apply(args=(article.id, ),
kwargs={'apply_now': True})
finally:
# HEADS UP: in case the system is overloaded, we intentionaly
# don't release the lock to avoid over-re-launched
# global tasks to flood the queue with useless
# double-triple-Nble individual tasks.
#
# my_lock.release()
pass
@task(name='oneflow.core.tasks.reprocess_failed_articles_pass2',
queue='check', expire=EXPIRY_REPROCESS_PASS2)
def reprocess_failed_articles_pass2(limit=None, force=False):
""" Run reprocess_failed_articles() on articles from yesterday. """
failed = Article.objects.url_error().created_previous_day()
expiry = EXPIRY_REPROCESS_PASS2
reprocess_failed_articles(failed=failed, expiry=expiry,
limit=limit, force=force)
@task(name='oneflow.core.tasks.reprocess_failed_articles_pass3',
queue='check', expire=int(
EXPIRY_REPROCESS_PASS3 - 0.05 * EXPIRY_REPROCESS_PASS3))
def reprocess_failed_articles_pass3(limit=None, force=False):
""" Run reprocess_failed_articles() on articles from last week. """
failed = Article.objects.url_error().created_previous_week()
expiry = EXPIRY_REPROCESS_PASS3
reprocess_failed_articles(failed=failed, expiry=expiry,
limit=limit, force=force)
| agpl-3.0 |
beni55/augmented-traffic-control | atc/django-atc-api/setup.py | 16 | 2568 | #!/usr/bin/env python
#
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
#
import os
import re
import sys
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version('atc_api')
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
if os.system("pip freeze | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='django-atc-api',
version=version,
description='REST API for ATC',
author='Emmanuel Bretelle',
author_email='chantra@fb.com',
url='https://github.com/facebook/augmented-traffic-control',
packages=get_packages('atc_api'),
package_data=get_package_data('atc_api'),
classifiers=['Programming Language :: Python', ],
long_description=README,
install_requires=['atc_thrift', 'djangorestframework']
)
| bsd-3-clause |
uniteddiversity/mediadrop | mediadrop/lib/tests/xhtml_normalization_test.py | 7 | 2947 | # -*- coding: utf-8 -*-
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from mediadrop.lib.helpers import clean_xhtml, line_break_xhtml
from mediadrop.lib.xhtml import cleaner_settings
from mediadrop.lib.test.pythonic_testcase import *
class XHTMLNormalizationTest(PythonicTestCase):
def test_can_replace_linebreaks_with_p_tags(self):
htmlified_text = clean_xhtml('first\nline\n\nsecond line')
assert_equals('<p>first line</p><p>second line</p>', htmlified_text)
assert_equals(htmlified_text, clean_xhtml(htmlified_text))
def test_trailing_newlines_are_removed_in_output(self):
assert_equals(clean_xhtml('first\n'), clean_xhtml('first\n\n'))
def test_text_do_not_change_after_a_clean_xhtml_and_line_break_xhtml_cycle(self):
"""Mimics the input -> clean -> display -> input... cycle of the
XHTMLTextArea widget.
"""
expected_html = '<p>first line</p><p>second line</p>'
htmlified_text = clean_xhtml('first\nline\n\nsecond line')
assert_equals(expected_html, htmlified_text)
# Ensure that re-cleaning the XHTML provides the same result.
display_text = line_break_xhtml(htmlified_text)
assert_equals('<p>first line</p>\n<p>second line</p>', display_text)
assert_equals(expected_html, clean_xhtml(display_text))
def test_adds_nofollow_attribute_to_links(self):
original = '<a href="http://example.com">link</a>'
cleaned = clean_xhtml(original)
assert_equals(cleaned, '<a href="http://example.com" rel="nofollow">link</a>')
def _test_removes_follow_attribute_from_links(self):
original = '<a href="http://example.com" rel="follow">link</a>'
cleaned = clean_xhtml(original)
assert_equals(cleaned, '<a href="http://example.com" rel="nofollow">link</a>')
def test_makes_automatic_links_nofollow(self):
original = 'http://example.com'
cleaned = clean_xhtml(original)
assert_equals(cleaned, '<a href="http://example.com" rel="nofollow">http://example.com</a>')
def test_adds_target_blank_to_links(self):
original = '<a href="http://example.com">link</a>'
from copy import deepcopy
settings = deepcopy(cleaner_settings)
settings['filters'].append('add_target_blank')
cleaned = clean_xhtml(original, _cleaner_settings=settings)
assert_equals(cleaned, '<a href="http://example.com" rel="nofollow" target="_blank">link</a>')
import unittest
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(XHTMLNormalizationTest))
return suite
| gpl-3.0 |
jkonecny12/anaconda | pyanaconda/modules/storage/partitioning/automatic/automatic_interface.py | 6 | 2643 | #
# DBus interface for the auto partitioning module.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dasbus.server.interface import dbus_interface
from dasbus.server.property import emits_properties_changed
from dasbus.typing import * # pylint: disable=wildcard-import
from pyanaconda.modules.common.constants.objects import AUTO_PARTITIONING
from pyanaconda.modules.common.structures.partitioning import PartitioningRequest
from pyanaconda.modules.storage.partitioning.base_interface import PartitioningInterface
@dbus_interface(AUTO_PARTITIONING.interface_name)
class AutoPartitioningInterface(PartitioningInterface):
"""DBus interface for the auto partitioning module."""
def connect_signals(self):
"""Connect the signals."""
super().connect_signals()
self.watch_property("Request", self.implementation.request_changed)
@property
def Request(self) -> Structure:
"""The partitioning request."""
return PartitioningRequest.to_structure(self.implementation.request)
@emits_properties_changed
def SetRequest(self, request: Structure):
"""Set the partitioning request.
:param request: a request
"""
self.implementation.set_request(PartitioningRequest.from_structure(request))
def RequiresPassphrase(self) -> Bool:
"""Is the default passphrase required?
:return: True or False
"""
return self.implementation.requires_passphrase()
@emits_properties_changed
def SetPassphrase(self, passphrase: Str):
"""Set a default passphrase for all encrypted devices.
:param passphrase: a string with a passphrase
"""
self.implementation.set_passphrase(passphrase)
| gpl-2.0 |
gaddman/ansible | lib/ansible/modules/network/avi/avi_networksecuritypolicy.py | 31 | 4317 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_networksecuritypolicy
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of NetworkSecurityPolicy Avi RESTful Object
description:
- This module is used to configure NetworkSecurityPolicy object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cloud_config_cksum:
description:
- Checksum of cloud configuration for network sec policy.
- Internally set by cloud connector.
created_by:
description:
- Creator name.
description:
description:
- User defined description for the object.
name:
description:
- Name of the object.
rules:
description:
- List of networksecurityrule.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a network security policy to block clients represented by ip group known_attackers
avi_networksecuritypolicy:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
name: vs-gurutest-ns
rules:
- action: NETWORK_SECURITY_POLICY_ACTION_TYPE_DENY
age: 0
enable: true
index: 1
log: false
match:
client_ip:
group_refs:
- Demo:known_attackers
match_criteria: IS_IN
name: Rule 1
tenant_ref: Demo
"""
RETURN = '''
obj:
description: NetworkSecurityPolicy (api/networksecuritypolicy) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cloud_config_cksum=dict(type='str',),
created_by=dict(type='str',),
description=dict(type='str',),
name=dict(type='str',),
rules=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'networksecuritypolicy',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
mlperf/training_results_v0.7 | Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v4-16/spatial_transform.py | 4 | 4912 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spatial transform functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def _padding(inputs, paddings, data_format):
"""Pads inputs w.r.t. data format."""
if data_format == 'channels_first':
padded_inputs = tf.pad(
inputs, [[0, 0], [0, 0], paddings, paddings])
else:
padded_inputs = tf.pad(
inputs, [[0, 0], paddings, paddings, [0, 0]])
return padded_inputs
def fixed_padding(inputs, kernel_size, data_format='channels_last'):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
return _padding(inputs, (pad_beg, pad_end), data_format)
def space_to_depth_fixed_padding(inputs, kernel_size,
data_format='channels_last', block_size=2):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
block_size: `int` block size for space-to-depth convolution.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = (pad_total // 2 + 1) // block_size
pad_end = (pad_total // 2) // block_size
return _padding(inputs, (pad_beg, pad_end), data_format)
def fused_transpose_and_space_to_depth(
images, image_size, block_size=2, transpose_input=True):
"""Fuses space-to-depth and transpose.
Space-to-depth performas the following permutation, which is equivalent to
tf.nn.space_to_depth.
images = tf.reshape(images, [batch, h // block_size, block_size,
w // block_size, block_size, c])
images = tf.transpose(images, [0, 1, 3, 2, 4, 5])
images = tf.reshape(images, [batch, h // block_size, w // block_size,
c * (block_size ** 2)])
Args:
images: A tensor with a shape of [batch_size, h, w, c] as the images. The
h and w can be dynamic sizes.
image_size: A tuple either (short_size, long_size) or (long_size,
short_size) that represents two shapes of images.
block_size: A integer for space-to-depth block size.
transpose_input: A boolean to indicate if the images tensor should be
transposed.
Returns:
A transformed images tensor.
"""
h, w = image_size
batch_size, _, _, c = images.get_shape().as_list()
images = tf.reshape(images,
[batch_size, h//block_size, block_size,
w//block_size, block_size, c])
if transpose_input:
images = tf.transpose(
images, [1, 3, 0, 2, 4, 5])
images = tf.reshape(
images, [h // block_size, w // block_size, batch_size,
c * (block_size ** 2)])
images = tf.reshape(
images, [-1, batch_size, c * (block_size ** 2)])
else:
images = tf.transpose(
images, [0, 1, 3, 2, 4, 5])
images = tf.reshape(
images, [batch_size, h // block_size, w // block_size,
c * (block_size ** 2)])
images = tf.reshape(
images, [batch_size, -1, c * (block_size ** 2)])
return images
| apache-2.0 |
Yonjuni/true-or-false | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| gpl-3.0 |
harlowja/networkx | networkx/algorithms/bipartite/redundancy.py | 45 | 3946 | # -*- coding: utf-8 -*-
"""Node redundancy for bipartite graphs."""
# Copyright (C) 2011 by
# Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
from __future__ import division
from itertools import combinations
from networkx import NetworkXError
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg (hagberg@lanl.gov)'])
__all__ = ['node_redundancy']
def node_redundancy(G, nodes=None):
r"""Computes the node redundancy coefficients for the nodes in the bipartite
graph ``G``.
The redundancy coefficient of a node `v` is the fraction of pairs of
neighbors of `v` that are both linked to other nodes. In a one-mode
projection these nodes would be linked together even if `v` were
not there.
More formally, for any vertex `v`, the *redundancy coefficient of `v`* is
defined by
.. math::
rc(v) = \frac{|\{\{u, w\} \subseteq N(v),
\: \exists v' \neq v,\: (v',u) \in E\:
\mathrm{and}\: (v',w) \in E\}|}{ \frac{|N(v)|(|N(v)|-1)}{2}},
where `N(v)` is the set of neighbors of `v` in ``G``.
Parameters
----------
G : graph
A bipartite graph
nodes : list or iterable (optional)
Compute redundancy for these nodes. The default is all nodes in G.
Returns
-------
redundancy : dictionary
A dictionary keyed by node with the node redundancy value.
Examples
--------
Compute the redundancy coefficient of each node in a graph::
>>> import networkx as nx
>>> from networkx.algorithms import bipartite
>>> G = nx.cycle_graph(4)
>>> rc = bipartite.node_redundancy(G)
>>> rc[0]
1.0
Compute the average redundancy for the graph::
>>> import networkx as nx
>>> from networkx.algorithms import bipartite
>>> G = nx.cycle_graph(4)
>>> rc = bipartite.node_redundancy(G)
>>> sum(rc.values()) / len(G)
1.0
Compute the average redundancy for a set of nodes::
>>> import networkx as nx
>>> from networkx.algorithms import bipartite
>>> G = nx.cycle_graph(4)
>>> rc = bipartite.node_redundancy(G)
>>> nodes = [0, 2]
>>> sum(rc[n] for n in nodes) / len(nodes)
1.0
Raises
------
NetworkXError
If any of the nodes in the graph (or in ``nodes``, if specified) has
(out-)degree less than two (which would result in division by zero,
according to the definition of the redundancy coefficient).
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if nodes is None:
nodes = G
if any(len(G[v]) < 2 for v in nodes):
raise NetworkXError('Cannot compute redundancy coefficient for a node'
' that has fewer than two neighbors.')
# TODO This can be trivially parallelized.
return {v: _node_redundancy(G, v) for v in nodes}
def _node_redundancy(G, v):
"""Returns the redundancy of the node ``v`` in the bipartite graph ``G``.
If ``G`` is a graph with ``n`` nodes, the redundancy of a node is the ratio
of the "overlap" of ``v`` to the maximum possible overlap of ``v``
according to its degree. The overlap of ``v`` is the number of pairs of
neighbors that have mutual neighbors themselves, other than ``v``.
``v`` must have at least two neighbors in ``G``.
"""
n = len(G[v])
# TODO On Python 3, we could just use `G[u].keys() & G[w].keys()` instead
# of instantiating the entire sets.
overlap = sum(1 for (u, w) in combinations(G[v], 2)
if (set(G[u]) & set(G[w])) - {v})
return (2 * overlap) / (n * (n - 1))
| bsd-3-clause |
Linutux/Gourmet | gourmet/plugins/unit_converter/convertGui.py | 1 | 7209 | import gourmet.convert as convert
import gtk
from gourmet.gglobals import *
from gourmet.gdebug import *
from gourmet.gtk_extras.cb_extras import *
import gourmet.GourmetRecipeManager
from gettext import gettext as _
import os
try:
current_path = os.path.split(os.path.join(os.getcwd(),__file__))[0]
except:
current_path = ''
class ConvGui:
"""This is a simple interface for the converter."""
def __init__ (self, converter=convert.get_converter(),
unitModel=None,
amt1=None, unit1=None, item=None,
okcb=None
):
self.possible_conversions = None
self.ui = gtk.Builder()
self.ui.add_from_file(os.path.join(current_path,'converter.ui'))
self.conv = converter
self.changing_item = False
self.okcb = okcb
self.widget_names = ['window','amt1Entry', 'amt2Label', 'unit1ComboBox', 'unit2ComboBox',
'itemComboBox', 'densitySpinButton', 'useDensityCheckButton', 'statusbar','expander1','messageLabel']
# grab all our widgets
for w in self.widget_names:
setattr(self,w,self.ui.get_object(w))
# HACK FOR READABILITY w/o glade change
self.resultLabel = self.amt2Label
self.resultLabel.set_use_markup(True)
self.resultLabel.set_line_wrap(True)
if unitModel: self.unitModel=unitModel
else: self.unitModel=gourmet.GourmetRecipeManager.UnitModel(self.conv)
#self.unit2Model = self.unitModel.filter_new()
self.unit1ComboBox.set_model(self.unitModel)
self.unit1ComboBox.set_wrap_width(3)
self.unit2ComboBox.set_wrap_width(3)
#self.unit2Model.set_visible_func(self.unit_filter)
#self.unit2ComboBox.set_model(self.unit2Model)
self.unit2ComboBox.set_model(self.unitModel)
for combobox in [self.unit1ComboBox, self.unit2ComboBox]:
cell = gtk.CellRendererText()
combobox.pack_start(cell, True)
combobox.add_attribute(cell, 'text', 1)
setup_typeahead(combobox)
#self.itemComboBox.append_text('None')
ikeys = self.conv.density_table.keys()
ikeys.sort()
for itm in ikeys:
self.itemComboBox.append_text(itm)
if len(self.conv.density_table.keys()) > 8:
self.itemComboBox.set_wrap_width(3)
setup_typeahead(self.itemComboBox)
if amt1:
self.amt1Entry=self.conv.float_to_frac(amt1)
self.ui.connect_signals({
'amt1changed':self.changed,
'unit1changed':self.changed,
'unit2changed':self.changed,
'itemChanged':self.density_itm_changed,
'densitySpinChanged':self.density_spin_changed,
'densityChecked':self.density_toggled,
'close':self.close,
})
self.last_amt1 = None
self.last_amt2 = None
self.last_unit1 = None
self.last_unit2 = None
if unit1 and self.conv.unit_dict.has_key[unit1]:
u = self.conv.unit_dict[unit1]
cb_set_active_text(self.unit1ComboBox,u)
if amt1: self.amt1Entry.set_text("%s"%amt1)
if item: cb_set_active_text(self.itemComboBox,item)
def changed (self, *args):
amt1 = convert.frac_to_float(self.amt1Entry.get_text())
#amt2 = convert.frac_to_float(self.resultLabel.get_text())
#amt2 = self.amt2
unit1 = cb_get_active_text(self.unit1ComboBox)
unit2 = cb_get_active_text(self.unit2ComboBox)
if unit1 != self.last_unit1:
self.get_possible_conversions()
#self.unit2Model.refilter()
if amt1 and unit2:
self.convert(amt1, unit1, unit2)
self.last_amt1 = amt1
#self.last_amt2 = amt2
self.last_unit1 = unit1
self.last_unit2 = unit2
def convert (self, amt1, unit1, unit2):
density=None
#if self.useDensityCheckButton.get_active():
density=self.densitySpinButton.get_value()
if density <= 0 or not self.expander1.get_expanded(): density = None
conversion = self.conv.convert_fancy(unit1, unit2, density=density)
message = ""
if conversion:
amt2 = amt1 * conversion
if amt2 < (0.05):
retAmt = "%1.3e"%amt2
else:
retAmt = convert.float_to_frac(amt2)
result = "%s %s = <b>%s</b> %s"%(convert.float_to_frac(amt1),
unit1,
retAmt,
unit2)
else:
result = _("Cannot convert %s to %s")%(unit1,unit2)
if not density:
message= _("Need density information.")
if not self.expander1.get_expanded():
self.expander1.set_expanded(True)
self.changed()
self.itemComboBox.activate()
self.resultLabel.set_text(result)
self.resultLabel.set_use_markup(True)
self.resultLabel.set_line_wrap(True)
self.messageLabel.set_text("<i>%s</i>"%message)
self.messageLabel.set_use_markup(True)
def message (self, msg):
id=self.statusbar.get_context_id('main')
self.statusbar.push(id,msg)
def unit_filter (self, mod, iter):
u = mod.get_value(iter,0)
if not self.possible_conversions:
self.get_possible_conversions()
if u in self.possible_conversions:
return True
else:
return False
def get_possible_conversions (self):
density=self.densitySpinButton.get_value()
#if self.useDensityCheckButton.get_active():
# density=self.densitySpinButton.get_value()
if density <= 0 or not self.expander1.get_expanded(): density = None
u1 = cb_get_active_text(self.unit1ComboBox)
self.possible_conversions = self.conv.get_all_conversions(u1,density=density)
def density_toggled (self, *args):
sens = self.useDensityCheckButton.get_active()
self.densitySpinButton.set_sensitive(sens)
self.itemComboBox.set_sensitive(sens)
self.changed()
def density_itm_changed (self, *args):
debug('density_itm_changed',5)
self.changing_item=True
itm=cb_get_active_text(self.itemComboBox)
if itm != _('None'):
self.densitySpinButton.set_value(self.conv.density_table[itm])
else:
self.densitySpinButton.set_value(0)
self.changed()
self.changing_item=False
def density_spin_changed (self, *args):
debug('density_spin_changed',5)
if not self.changing_item:
self.itemComboBox.set_active(0)
self.changed()
def close (self, *args):
self.window.hide()
if self.okcb:
self.okcb(cb_get_active_text(self.unit2ComboBox),resultLabel.get_text())
if __name__ == '__main__':
gtk.main_quit()
if __name__ == '__main__':
uibase="/home/tom/Projects/gourmet/glade/"
cg=ConvGui()
gtk.main()
| gpl-2.0 |
taktik/account-financial-tools | account_credit_control/wizard/credit_control_marker.py | 39 | 3469 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright 2012-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
class CreditControlMarker(models.TransientModel):
""" Change the state of lines in mass """
_name = 'credit.control.marker'
_description = 'Mass marker'
@api.model
def _get_line_ids(self):
context = self.env.context
if not (context.get('active_model') == 'credit.control.line' and
context.get('active_ids')):
return False
line_obj = self.env['credit.control.line']
lines = line_obj.browse(context['active_ids'])
return self._filter_lines(lines)
name = fields.Selection([('ignored', 'Ignored'),
('to_be_sent', 'Ready To Send'),
('sent', 'Done')],
string='Mark as',
default='to_be_sent',
required=True)
line_ids = fields.Many2many('credit.control.line',
string='Credit Control Lines',
default=_get_line_ids,
domain="[('state', '!=', 'sent')]")
@api.model
@api.returns('credit.control.line')
def _filter_lines(self, lines):
""" get line to be marked filter done lines """
line_obj = self.env['credit.control.line']
domain = [('state', '!=', 'sent'), ('id', 'in', lines.ids)]
return line_obj.search(domain)
@api.model
@api.returns('credit.control.line')
def _mark_lines(self, filtered_lines, state):
""" write hook """
assert state
filtered_lines.write({'state': state})
return filtered_lines
@api.multi
def mark_lines(self):
""" Write state of selected credit lines to the one in entry
done credit line will be ignored """
self.ensure_one()
if not self.line_ids:
raise api.Warning(_('No credit control lines selected.'))
filtered_lines = self._filter_lines(self.line_ids)
if not filtered_lines:
raise api.Warning(_('No lines will be changed. '
'All the selected lines are already done.'))
self._mark_lines(filtered_lines, self.name)
return {'domain': unicode([('id', 'in', filtered_lines.ids)]),
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'credit.control.line',
'type': 'ir.actions.act_window'}
| agpl-3.0 |
kivarun/libvirt_metadata_api | test/test_arp.py | 2 | 2723 | import unittest
import utils
import mock
class ArpTestCase(unittest.TestCase):
def setUp(self):
self.linux_data = ('IP address HW type Flags HW address Mask Device\n'
'192.168.0.94 0x1 0x2 aa:aa:aa:aa:aa:aa * br0\n'
'192.168.0.92 0x1 0x2 bb:bb:bb:bb:bb:bb * br0\n'
'192.168.0.200 0x1 0x2 cc:cc:cc:cc:cc:cc * br0\n'
'192.168.0.254 0x1 0x2 ee:ee:ee:ee:ee:ee * br0\n')
self.darwin_data = ('? (192.168.0.1) at aa:aa:aa:aa:aa:aa on en0 ifscope [ethernet]\n'
'? (192.168.0.230) at cc:cc:cc:cc:cc:cc on en0 ifscope [ethernet]\n'
'? (192.168.0.250) at bb:bb:bb:bb:bb:bb on en0 ifscope [ethernet]\n')
def test_get_arp_table_linux(self):
with mock.patch('__builtin__.open', mock.mock_open(read_data=self.linux_data)):
self.assertDictEqual(
utils.arp.get_arp_table_linux(),
{'192.168.0.94': 'aa:aa:aa:aa:aa:aa',
'192.168.0.200': 'cc:cc:cc:cc:cc:cc',
'192.168.0.92': 'bb:bb:bb:bb:bb:bb',
'192.168.0.254': 'ee:ee:ee:ee:ee:ee'}
)
def test_get_arp_table_darwin(self):
with mock.patch('subprocess.check_output', return_value=self.darwin_data):
self.assertDictEqual(
utils.arp.get_arp_table_darwin(),
{'192.168.0.230': 'cc:cc:cc:cc:cc:cc',
'192.168.0.250': 'bb:bb:bb:bb:bb:bb',
'192.168.0.1': 'aa:aa:aa:aa:aa:aa'}
)
def test_get_mac_address_linux(self):
with mock.patch('__builtin__.open', mock.mock_open(read_data=self.linux_data)):
for linux_platform in ('linux', 'linux2'):
with mock.patch('sys.platform', new=linux_platform):
self.assertEqual(utils.arp.get_mac_address('192.168.0.94'), 'aa:aa:aa:aa:aa:aa')
self.assertEqual(utils.arp.get_mac_address('1.1.1.1'), None)
def test_get_mac_address_darwin(self):
with mock.patch('sys.platform', new='darwin'), mock.patch('subprocess.check_output', return_value=self.darwin_data):
self.assertEqual(utils.arp.get_mac_address('192.168.0.250'), 'bb:bb:bb:bb:bb:bb')
self.assertEqual(utils.arp.get_mac_address('1.1.1.1'), None)
def test_get_mac_address_unsupported(self):
with mock.patch('sys.platform', new='unsupported'):
self.assertRaises(Exception, utils.arp.get_mac_address, '1.1.1.1')
| gpl-3.0 |
capriele/crazyflie-clients-python-move | build/lib.linux-i686-2.7/cfclient/utils/singleton.py | 31 | 1499 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Singleton class.
"""
__author__ = 'Bitcraze AB'
__all__ = ['Singleton']
class Singleton(type):
"""Class for making singletons"""
_instances = {}
def __call__(cls, *args, **kwargs):
"""Called when creating new class"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
| gpl-2.0 |
patrickm/chromium.src | chrome/common/extensions/docs/server2/patch_servlet_test.py | 3 | 6647 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from HTMLParser import HTMLParser
import unittest
from fake_fetchers import ConfigureFakeFetchers
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from patch_servlet import PatchServlet
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Request
from test_branch_utility import TestBranchUtility
from test_util import DisableLogging
_ALLOWED_HOST = 'https://chrome-apps-doc.appspot.com'
def _CheckURLsArePatched(content, patch_servlet_path):
errors = []
class LinkChecker(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag != 'a':
return
tag_description = '<a %s .../>' % ' '.join('%s="%s"' % (key, val)
for key, val in attrs)
attrs = dict(attrs)
if ('href' in attrs and
attrs['href'].startswith('/') and
not attrs['href'].startswith('/%s/' % patch_servlet_path)):
errors.append('%s has an unqualified href' % tag_description)
LinkChecker().feed(content)
return errors
class _RenderServletDelegate(RenderServlet.Delegate):
def CreateServerInstance(self):
return ServerInstance.ForLocal()
class _PatchServletDelegate(RenderServlet.Delegate):
def CreateBranchUtility(self, object_store_creator):
return TestBranchUtility.CreateWithCannedData()
def CreateHostFileSystemProvider(self, object_store_creator, **optargs):
return HostFileSystemProvider.ForLocal(object_store_creator, **optargs)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider.ForEmpty()
class PatchServletTest(unittest.TestCase):
def setUp(self):
ConfigureFakeFetchers()
def _RenderWithPatch(self, path, issue):
path_with_issue = '%s/%s' % (issue, path)
return PatchServlet(Request.ForTest(path_with_issue, host=_ALLOWED_HOST),
_PatchServletDelegate()).Get()
def _RenderWithoutPatch(self, path):
return RenderServlet(Request.ForTest(path, host=_ALLOWED_HOST),
_RenderServletDelegate()).Get()
def _RenderAndCheck(self, path, issue, expected_equal):
'''Renders |path| with |issue| patched in and asserts that the result is
the same as |expected_equal| modulo any links that get rewritten to
"_patch/issue".
'''
patched_response = self._RenderWithPatch(path, issue)
unpatched_response = self._RenderWithoutPatch(path)
patched_response.headers.pop('cache-control', None)
unpatched_response.headers.pop('cache-control', None)
unpatched_content = unpatched_response.content.ToString()
# Check that all links in the patched content are qualified with
# the patch URL, then strip them out for checking (in)equality.
patched_content = patched_response.content.ToString()
patch_servlet_path = '_patch/%s' % issue
errors = _CheckURLsArePatched(patched_content, patch_servlet_path)
self.assertFalse(errors,
'%s\nFound errors:\n * %s' % (patched_content, '\n * '.join(errors)))
patched_content = patched_content.replace('/%s' % patch_servlet_path, '')
self.assertEqual(patched_response.status, unpatched_response.status)
self.assertEqual(patched_response.headers, unpatched_response.headers)
if expected_equal:
self.assertEqual(patched_content, unpatched_content)
else:
self.assertNotEqual(patched_content, unpatched_content)
def _RenderAndAssertEqual(self, path, issue):
self._RenderAndCheck(path, issue, True)
def _RenderAndAssertNotEqual(self, path, issue):
self._RenderAndCheck(path, issue, False)
@DisableLogging('warning')
def _AssertNotFound(self, path, issue):
response = self._RenderWithPatch(path, issue)
self.assertEqual(response.status, 404,
'Path %s with issue %s should have been removed for %s.' % (
path, issue, response))
def _AssertOk(self, path, issue):
response = self._RenderWithPatch(path, issue)
self.assertEqual(response.status, 200,
'Failed to render path %s with issue %s.' % (path, issue))
self.assertTrue(len(response.content.ToString()) > 0,
'Rendered result for path %s with issue %s should not be empty.' %
(path, issue))
def _AssertRedirect(self, path, issue, redirect_path):
response = self._RenderWithPatch(path, issue)
self.assertEqual(302, response.status)
self.assertEqual('/_patch/%s/%s' % (issue, redirect_path),
response.headers['Location'])
def testRender(self):
# '_patch' is not included in paths below because it's stripped by Handler.
issue = '14096030'
# TODO(kalman): Test with chrome_sidenav.json once the sidenav logic has
# stabilised.
# extensions/runtime.html is removed in the patch, should redirect to the
# apps version.
self._AssertRedirect('extensions/runtime', issue, 'apps/runtime')
# apps/runtime.html is not removed.
self._RenderAndAssertEqual('apps/runtime', issue)
# test_foo.html is added in the patch.
self._AssertOk('extensions/test_foo', issue)
# Invalid issue number results in a 404.
self._AssertNotFound('extensions/index', '11111')
def testXssRedirect(self):
def is_redirect(from_host, from_path, to_url):
response = PatchServlet(Request.ForTest(from_path, host=from_host),
_PatchServletDelegate()).Get()
redirect_url, _ = response.GetRedirect()
if redirect_url is None:
return (False, '%s/%s did not cause a redirect' % (
from_host, from_path))
if redirect_url != to_url:
return (False, '%s/%s redirected to %s not %s' % (
from_host, from_path, redirect_url, to_url))
return (True, '%s/%s redirected to %s' % (
from_host, from_path, redirect_url))
self.assertTrue(*is_redirect('http://developer.chrome.com', '12345',
'%s/_patch/12345' % _ALLOWED_HOST))
self.assertTrue(*is_redirect('http://developers.google.com', '12345',
'%s/_patch/12345' % _ALLOWED_HOST))
self.assertFalse(*is_redirect('http://chrome-apps-doc.appspot.com', '12345',
None))
self.assertFalse(*is_redirect('http://some-other-app.appspot.com', '12345',
None))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
tbonza/GatherNews | build/lib/gathernews/parse.py | 1 | 2192 | """ Handle parsing the RSS feed. """
from datetime import datetime
import logging
import re
import feedparser
from gathernews.template import ITEM
logger = logging.getLogger(__name__)
def get_source(fp: feedparser.FeedParserDict()) -> str:
try:
return fp['feed']['title_detail']['base']
except Exception as ex:
logger.exception(ex)
return ""
def left_pad(num: int) -> str:
if len(str(num)) == 1:
return "0" + str(num)
else:
return str(num)
def get_datetime(fp: feedparser.FeedParserDict()) -> str:
try:
tm = fp['feed']['updated_parsed']
return str(tm.year) + left_pad(tm.month) + left_pad(tm.day) + \
left_pad(tm.hour) + left_pad(tm.minute)
except Exception as exc:
logger.exception(exc)
tm = datetime.now()
return str(tm.year) + left_pad(tm.month) + left_pad(tm.day) + \
left_pad(tm.hour) + left_pad(tm.minute)
def get_title(entry: feedparser:FeedParserDict()) -> str:
try:
return entry['title']
except Exception as exc:
logger.exception(exc)
return ""
def get_summary(entry: feedparser.FeedParserDict()) -> str:
try:
return entry['summary']
except Exception as exc:
logger.exception(exc)
return ""
def get_rss_link(entry: feedparser.FeedParserDict()) -> str:
try:
return entry['link']
except Exception as exc:
logger.exception(exc)
return ""
def get_date_published(entry: feedparser.FeedParserDict()) -> str:
try:
return entry['published']
except Exception as exc:
logger.exception(exc)
return ""
def map_rss(fp: feedpaser.FeedParserDict()) -> list:
""" Map rss entry items to data template. """
items = []
for entry in fp['entries']:
item = ITEM.copy()
item['source'] = get_source(fp)
item['extract_datetime'] = get_datetime(fp)
item['title'] = get_title(entry)
item['summary'] = get_summary(entry)
item['rss_link'] = get_rss_link(entry)
item['published'] = get_date_published(entry)
items.append(item)
return items
| mit |
martinjina/git-repo | subcmds/rebase.py | 48 | 4130 | #
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from git_command import GitCommand
class Rebase(Command):
common = True
helpSummary = "Rebase local branches on upstream branch"
helpUsage = """
%prog {[<project>...] | -i <project>...}
"""
helpDescription = """
'%prog' uses git rebase to move local changes in the current topic branch to
the HEAD of the upstream history, useful when you have made commits in a topic
branch but need to incorporate new upstream changes "underneath" them.
"""
def _Options(self, p):
p.add_option('-i', '--interactive',
dest="interactive", action="store_true",
help="interactive rebase (single project only)")
p.add_option('-f', '--force-rebase',
dest='force_rebase', action='store_true',
help='Pass --force-rebase to git rebase')
p.add_option('--no-ff',
dest='no_ff', action='store_true',
help='Pass --no-ff to git rebase')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='Pass --quiet to git rebase')
p.add_option('--autosquash',
dest='autosquash', action='store_true',
help='Pass --autosquash to git rebase')
p.add_option('--whitespace',
dest='whitespace', action='store', metavar='WS',
help='Pass --whitespace to git rebase')
p.add_option('--auto-stash',
dest='auto_stash', action='store_true',
help='Stash local modifications before starting')
def Execute(self, opt, args):
all_projects = self.GetProjects(args)
one_project = len(all_projects) == 1
if opt.interactive and not one_project:
print >>sys.stderr, 'error: interactive rebase not supported with multiple projects'
return -1
for project in all_projects:
cb = project.CurrentBranch
if not cb:
if one_project:
print >>sys.stderr, "error: project %s has a detatched HEAD" % project.relpath
return -1
# ignore branches with detatched HEADs
continue
upbranch = project.GetBranch(cb)
if not upbranch.LocalMerge:
if one_project:
print >>sys.stderr, "error: project %s does not track any remote branches" % project.relpath
return -1
# ignore branches without remotes
continue
args = ["rebase"]
if opt.whitespace:
args.append('--whitespace=%s' % opt.whitespace)
if opt.quiet:
args.append('--quiet')
if opt.force_rebase:
args.append('--force-rebase')
if opt.no_ff:
args.append('--no-ff')
if opt.autosquash:
args.append('--autosquash')
if opt.interactive:
args.append("-i")
args.append(upbranch.LocalMerge)
print >>sys.stderr, '# %s: rebasing %s -> %s' % \
(project.relpath, cb, upbranch.LocalMerge)
needs_stash = False
if opt.auto_stash:
stash_args = ["update-index", "--refresh", "-q"]
if GitCommand(project, stash_args).Wait() != 0:
needs_stash = True
# Dirty index, requires stash...
stash_args = ["stash"]
if GitCommand(project, stash_args).Wait() != 0:
return -1
if GitCommand(project, args).Wait() != 0:
return -1
if needs_stash:
stash_args.append('pop')
stash_args.append('--quiet')
if GitCommand(project, stash_args).Wait() != 0:
return -1
| apache-2.0 |
rajashreer7/autotest-client-tests | linux-tools/nscd/nscd.py | 3 | 1545 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class nscd(test.test):
"""
Autotest module for testing basic functionality
of nscd
@author
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
if not sm.check_installed('gcc'):
logging.debug("gcc missing - trying to install")
sm.install('gcc')
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/nscd" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./nscd.sh'], cwd="%s/nscd" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 |
syci/OCB | addons/subscription/subscription.py | 26 | 7939 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# TODO:
# Error treatment: exception, request, ... -> send request to user_id
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class subscription_document(osv.osv):
_name = "subscription.document"
_description = "Subscription Document"
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the subscription document without removing it."),
'model': fields.many2one('ir.model', 'Object', required=True),
'field_ids': fields.one2many('subscription.document.fields', 'document_id', 'Fields', copy=True)
}
_defaults = {
'active' : lambda *a: True,
}
class subscription_document_fields(osv.osv):
_name = "subscription.document.fields"
_description = "Subscription Document Fields"
_rec_name = 'field'
_columns = {
'field': fields.many2one('ir.model.fields', 'Field', domain="[('model_id', '=', parent.model)]", required=True),
'value': fields.selection([('false','False'),('date','Current Date')], 'Default Value', size=40, help="Default value is considered for field when new document is generated."),
'document_id': fields.many2one('subscription.document', 'Subscription Document', ondelete='cascade'),
}
_defaults = {}
def _get_document_types(self, cr, uid, context=None):
cr.execute('select m.model, s.name from subscription_document s, ir_model m WHERE s.model = m.id order by s.name')
return cr.fetchall()
class subscription_subscription(osv.osv):
_name = "subscription.subscription"
_description = "Subscription"
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the subscription without removing it."),
'partner_id': fields.many2one('res.partner', 'Partner'),
'notes': fields.text('Internal Notes'),
'user_id': fields.many2one('res.users', 'User', required=True),
'interval_number': fields.integer('Interval Qty'),
'interval_type': fields.selection([('days', 'Days'), ('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'exec_init': fields.integer('Number of documents'),
'date_init': fields.datetime('First Date'),
'state': fields.selection([('draft','Draft'),('running','Running'),('done','Done')], 'Status', copy=False),
'doc_source': fields.reference('Source Document', required=True, selection=_get_document_types, size=128, help="User can choose the source document on which he wants to create documents"),
'doc_lines': fields.one2many('subscription.subscription.history', 'subscription_id', 'Documents created', readonly=True),
'cron_id': fields.many2one('ir.cron', 'Cron Job', help="Scheduler which runs on subscription", states={'running':[('readonly',True)], 'done':[('readonly',True)]}),
'note': fields.text('Notes', help="Description or Summary of Subscription"),
}
_defaults = {
'date_init': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'user_id': lambda obj,cr,uid,context: uid,
'active': lambda *a: True,
'interval_number': lambda *a: 1,
'interval_type': lambda *a: 'months',
'doc_source': lambda *a: False,
'state': lambda *a: 'draft'
}
def _auto_end(self, cr, context=None):
super(subscription_subscription, self)._auto_end(cr, context=context)
# drop the FK from subscription to ir.cron, as it would cause deadlocks
# during cron job execution. When model_copy() tries to write() on the subscription,
# it has to wait for an ExclusiveLock on the cron job record, but the latter
# is locked by the cron system for the duration of the job!
# FIXME: the subscription module should be reviewed to simplify the scheduling process
# and to use a unique cron job for all subscriptions, so that it never needs to
# be updated during its execution.
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (self._table, '%s_cron_id_fkey' % self._table))
def set_process(self, cr, uid, ids, context=None):
for row in self.read(cr, uid, ids, context=context):
mapping = {'name':'name','interval_number':'interval_number','interval_type':'interval_type','exec_init':'numbercall','date_init':'nextcall'}
res = {'model':'subscription.subscription', 'args': repr([[row['id']]]), 'function':'model_copy', 'priority':6, 'user_id':row['user_id'] and row['user_id'][0]}
for key,value in mapping.items():
res[value] = row[key]
id = self.pool.get('ir.cron').create(cr, uid, res)
self.write(cr, uid, [row['id']], {'cron_id':id, 'state':'running'})
return True
def model_copy(self, cr, uid, ids, context=None):
for row in self.read(cr, uid, ids, context=context):
if not row.get('cron_id',False):
continue
cron_ids = [row['cron_id'][0]]
remaining = self.pool.get('ir.cron').read(cr, uid, cron_ids, ['numbercall'])[0]['numbercall']
try:
(model_name, id) = row['doc_source'].split(',')
id = int(id)
model = self.pool[model_name]
except:
raise UserError(_('Please provide another source document.\nThis one does not exist!'))
default = {'state':'draft'}
doc_obj = self.pool.get('subscription.document')
document_ids = doc_obj.search(cr, uid, [('model.model','=',model_name)])
doc = doc_obj.browse(cr, uid, document_ids)[0]
for f in doc.field_ids:
if f.value=='date':
value = time.strftime('%Y-%m-%d')
else:
value = False
default[f.field.name] = value
state = 'running'
# if there was only one remaining document to generate
# the subscription is over and we mark it as being done
if remaining == 1:
state = 'done'
id = self.pool[model_name].copy(cr, uid, id, default, context)
self.pool.get('subscription.subscription.history').create(cr, uid, {'subscription_id': row['id'], 'date':time.strftime('%Y-%m-%d %H:%M:%S'), 'document_id': model_name+','+str(id)})
self.write(cr, uid, [row['id']], {'state':state})
return True
def unlink(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context or {}):
if record.state=="running":
raise UserError(_('You cannot delete an active subscription!'))
return super(subscription_subscription, self).unlink(cr, uid, ids, context)
def set_done(self, cr, uid, ids, context=None):
res = self.read(cr,uid, ids, ['cron_id'])
ids2 = [x['cron_id'][0] for x in res if x['id']]
self.pool.get('ir.cron').write(cr, uid, ids2, {'active':False})
self.write(cr, uid, ids, {'state':'done'})
return True
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'draft'})
return True
class subscription_subscription_history(osv.osv):
_name = "subscription.subscription.history"
_description = "Subscription history"
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date'),
'subscription_id': fields.many2one('subscription.subscription', 'Subscription', ondelete='cascade'),
'document_id': fields.reference('Source Document', required=True, selection=_get_document_types, size=128),
}
| agpl-3.0 |
tensorflow/models | official/vision/beta/projects/deepmac_maskrcnn/configs/deep_mask_head_rcnn_config_test.py | 1 | 1045 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check that the config is set correctly."""
import tensorflow as tf
from official.vision.beta.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn
class DeepMaskHeadRcnnConfigTest(tf.test.TestCase):
def test_config(self):
config = deep_mask_head_rcnn.deep_mask_head_rcnn_resnetfpn_coco()
self.assertIsInstance(config.task, deep_mask_head_rcnn.DeepMaskHeadRCNNTask)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
gautam1858/tensorflow | tensorflow/python/kernel_tests/boosted_trees/quantile_ops_test.py | 4 | 9790 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking quantile related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_handle_op as resource_handle_op
from tensorflow.python.ops.gen_boosted_trees_ops import is_boosted_trees_quantile_stream_resource_initialized as resource_initialized
from tensorflow.python.platform import googletest
from tensorflow.python.training import saver
@test_util.run_deprecated_v1
class QuantileOpsTest(test_util.TensorFlowTestCase):
def create_resource(self, name, eps, max_elements, num_streams=1):
quantile_accumulator_handle = resource_handle_op(
container="", shared_name=name, name=name)
create_op = boosted_trees_ops.create_quantile_stream_resource(
quantile_accumulator_handle,
epsilon=eps,
max_elements=max_elements,
num_streams=num_streams)
is_initialized_op = resource_initialized(quantile_accumulator_handle)
resources.register_resource(quantile_accumulator_handle, create_op,
is_initialized_op)
return quantile_accumulator_handle
def setUp(self):
"""Sets up the quantile ops test as follows.
Create a batch of 6 examples having 2 features
The data looks like this
| Instance | instance weights | Feature 0 | Feature 1
| 0 | 10 | 1.2 | 2.3
| 1 | 1 | 12.1 | 1.2
| 2 | 1 | 0.3 | 1.1
| 3 | 1 | 0.5 | 2.6
| 4 | 1 | 0.6 | 3.2
| 5 | 1 | 2.2 | 0.8
"""
self._feature_0 = constant_op.constant([1.2, 12.1, 0.3, 0.5, 0.6, 2.2],
dtype=dtypes.float32)
self._feature_1 = constant_op.constant([2.3, 1.2, 1.1, 2.6, 3.2, 0.8],
dtype=dtypes.float32)
self._feature_0_boundaries = np.array([0.3, 0.6, 1.2, 12.1])
self._feature_1_boundaries = np.array([0.8, 1.2, 2.3, 3.2])
self._feature_0_quantiles = constant_op.constant([2, 3, 0, 1, 1, 3],
dtype=dtypes.int32)
self._feature_1_quantiles = constant_op.constant([2, 1, 1, 3, 3, 0],
dtype=dtypes.int32)
self._example_weights = constant_op.constant(
[10, 1, 1, 1, 1, 1], dtype=dtypes.float32)
self.eps = 0.01
self.max_elements = 1 << 16
self.num_quantiles = constant_op.constant(3, dtype=dtypes.int64)
def testBasicQuantileBucketsSingleResource(self):
with self.cached_session() as sess:
quantile_accumulator_handle = self.create_resource("floats", self.eps,
self.max_elements, 2)
resources.initialize_resources(resources.shared_resources()).run()
summaries = boosted_trees_ops.make_quantile_summaries(
[self._feature_0, self._feature_1], self._example_weights,
epsilon=self.eps)
summary_op = boosted_trees_ops.quantile_add_summaries(
quantile_accumulator_handle, summaries)
flush_op = boosted_trees_ops.quantile_flush(
quantile_accumulator_handle, self.num_quantiles)
buckets = boosted_trees_ops.get_bucket_boundaries(
quantile_accumulator_handle, num_features=2)
quantiles = boosted_trees_ops.boosted_trees_bucketize(
[self._feature_0, self._feature_1], buckets)
self.evaluate(summary_op)
self.evaluate(flush_op)
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
def testBasicQuantileBucketsMultipleResources(self):
with self.cached_session() as sess:
quantile_accumulator_handle_0 = self.create_resource("float_0", self.eps,
self.max_elements)
quantile_accumulator_handle_1 = self.create_resource("float_1", self.eps,
self.max_elements)
resources.initialize_resources(resources.shared_resources()).run()
summaries = boosted_trees_ops.make_quantile_summaries(
[self._feature_0, self._feature_1], self._example_weights,
epsilon=self.eps)
summary_op_0 = boosted_trees_ops.quantile_add_summaries(
quantile_accumulator_handle_0,
[summaries[0]])
summary_op_1 = boosted_trees_ops.quantile_add_summaries(
quantile_accumulator_handle_1,
[summaries[1]])
flush_op_0 = boosted_trees_ops.quantile_flush(
quantile_accumulator_handle_0, self.num_quantiles)
flush_op_1 = boosted_trees_ops.quantile_flush(
quantile_accumulator_handle_1, self.num_quantiles)
bucket_0 = boosted_trees_ops.get_bucket_boundaries(
quantile_accumulator_handle_0, num_features=1)
bucket_1 = boosted_trees_ops.get_bucket_boundaries(
quantile_accumulator_handle_1, num_features=1)
quantiles = boosted_trees_ops.boosted_trees_bucketize(
[self._feature_0, self._feature_1], bucket_0 + bucket_1)
self.evaluate([summary_op_0, summary_op_1])
self.evaluate([flush_op_0, flush_op_1])
self.assertAllClose(self._feature_0_boundaries, bucket_0[0].eval())
self.assertAllClose(self._feature_1_boundaries, bucket_1[0].eval())
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
def testSaveRestoreAfterFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session() as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
num_streams=2, num_quantiles=3, epsilon=self.eps, name="q0")
save = saver.Saver()
resources.initialize_resources(resources.shared_resources()).run()
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
summaries = accumulator.add_summaries([self._feature_0, self._feature_1],
self._example_weights)
with ops.control_dependencies([summaries]):
flush = accumulator.flush()
self.evaluate(flush)
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
save.save(sess, save_path)
with self.test_session(graph=ops.Graph()) as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
num_streams=2, num_quantiles=3, epsilon=self.eps, name="q0")
save = saver.Saver()
save.restore(sess, save_path)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
def testSaveRestoreBeforeFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session() as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
num_streams=2, num_quantiles=3, epsilon=self.eps, name="q0")
save = saver.Saver()
resources.initialize_resources(resources.shared_resources()).run()
summaries = accumulator.add_summaries([self._feature_0, self._feature_1],
self._example_weights)
self.evaluate(summaries)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
save.save(sess, save_path)
self.evaluate(accumulator.flush())
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
with self.test_session(graph=ops.Graph()) as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
num_streams=2, num_quantiles=3, epsilon=self.eps, name="q0")
save = saver.Saver()
save.restore(sess, save_path)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
TomBaxter/osf.io | addons/figshare/tests/test_views.py | 23 | 2142 | #!/usr/bin/env python
# encoding: utf-8
import httplib as http
import mock
from nose.tools import assert_equal
import pytest
from addons.base.tests.views import (
OAuthAddonAuthViewsTestCaseMixin, OAuthAddonConfigViewsTestCaseMixin
)
from addons.figshare.tests.utils import FigshareAddonTestCase
from tests.base import OsfTestCase
from addons.figshare.client import FigshareClient
pytestmark = pytest.mark.django_db
class TestAuthViews(FigshareAddonTestCase, OAuthAddonAuthViewsTestCaseMixin, OsfTestCase):
pass
class TestConfigViews(FigshareAddonTestCase, OAuthAddonConfigViewsTestCaseMixin, OsfTestCase):
## Overrides
@mock.patch.object(FigshareClient, 'get_folders')
@mock.patch.object(FigshareClient, 'get_linked_folder_info')
def test_folder_list(self, mock_about, mock_folders):
mock_folders.return_value = [{'path': 'fileset', 'name': 'Memes', 'id': '009001'}]
mock_about.return_value = {'path': 'fileset', 'name': 'Memes', 'id': '009001'}
super(TestConfigViews, self).test_folder_list()
@mock.patch.object(FigshareClient, 'get_linked_folder_info')
def test_set_config(self, mock_about):
# Changed from super for mocking and log action name
mock_about.return_value = {'path': 'fileset', 'name': 'Memes', 'id': '009001'}
self.node_settings.set_auth(self.external_account, self.user)
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.put_json(url, {
'selected': self.folder
}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
self.project.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_folder_selected'.format(self.ADDON_SHORT_NAME)
)
assert_equal(
self.project.logs.latest().params['folder'],
self.folder['path']
)
assert_equal(res.json['result']['folder']['path'], self.folder['path'])
@mock.patch.object(FigshareClient, 'userinfo')
def test_get_config(self, mock_about):
super(TestConfigViews, self).test_get_config()
| apache-2.0 |
dwoods/gn-maps | geonode/search/templatetags/search_tags.py | 8 | 1188 | from django import template
register = template.Library()
def raw(parser, token):
# Whatever is between {% raw %} and {% endraw %} will be preserved as
# raw, unrendered template code.
text = []
parse_until = 'endraw'
tag_mapping = {
template.TOKEN_TEXT: ('', ''),
template.TOKEN_VAR: ('{{', '}}'),
template.TOKEN_BLOCK: ('{%', '%}'),
template.TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == template.TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(u''.join(text))
start, end = tag_mapping[token.token_type]
text.append(u'%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
raw = register.tag(raw)
| gpl-3.0 |
grimreaper/wolfbot-new | modules/common.py | 2 | 7663 | # The bot commands implemented in here are present no matter which module is loaded
import botconfig
from tools import decorators
import logging
import tools.moduleloader as ld
import traceback
from settings import common as var
from base64 import b64encode
import imp
import sys
import os
def on_privmsg(cli, rawnick, chan, msg, notice = False):
currmod = ld.MODULES[ld.CURRENT_MODULE]
if botconfig.IGNORE_HIDDEN_COMMANDS and (chan.startswith("@#") or chan.startswith("+#")):
return
if (notice and ((chan != botconfig.NICK and not botconfig.ALLOW_NOTICE_COMMANDS) or
(chan == botconfig.NICK and not botconfig.ALLOW_PRIVATE_NOTICE_COMMANDS))):
return # not allowed in settings
if chan != botconfig.NICK: #not a PM
if currmod and "" in currmod.COMMANDS.keys():
for fn in currmod.COMMANDS[""]:
try:
fn(cli, rawnick, chan, msg)
except Exception as e:
if botconfig.DEBUG_MODE:
raise e
else:
logging.error(traceback.format_exc())
cli.msg(chan, "An error has occurred and has been logged.")
# Now that is always called first.
for x in set(list(COMMANDS.keys()) + (list(currmod.COMMANDS.keys()) if currmod else list())):
if x and msg.lower().startswith(botconfig.CMD_CHAR+x):
h = msg[len(x)+1:]
if not h or h[0] == " " or not x:
for fn in COMMANDS.get(x,[])+(currmod.COMMANDS.get(x,[]) if currmod else []):
try:
fn(cli, rawnick, chan, h.lstrip())
except Exception as e:
if botconfig.DEBUG_MODE:
raise e
else:
logging.error(traceback.format_exc())
cli.msg(chan, "An error has occurred and has been logged.")
else:
for x in set(list(PM_COMMANDS.keys()) + (list(currmod.PM_COMMANDS.keys()) if currmod else list())):
if msg.lower().startswith(botconfig.CMD_CHAR+x):
h = msg[len(x)+1:]
elif not x or msg.lower().startswith(x):
h = msg[len(x):]
else:
continue
if not h or h[0] == " " or not x:
for fn in PM_COMMANDS.get(x, [])+(currmod.PM_COMMANDS.get(x,[]) if currmod else []):
try:
fn(cli, rawnick, h.lstrip())
except Exception as e:
if botconfig.DEBUG_MODE:
raise e
else:
logging.error(traceback.format_exc())
cli.msg(chan, "An error has occurred and has been logged.")
def __unhandled__(cli, prefix, cmd, *args):
currmod = ld.MODULES[ld.CURRENT_MODULE]
if cmd in set(list(HOOKS.keys())+(list(currmod.HOOKS.keys()) if currmod else list())):
largs = list(args)
for i,arg in enumerate(largs):
if isinstance(arg, bytes): largs[i] = arg.decode('ascii')
for fn in HOOKS.get(cmd, [])+(currmod.HOOKS.get(cmd, []) if currmod else []):
try:
fn(cli, prefix, *largs)
except Exception as e:
if botconfig.DEBUG_MODE:
raise e
else:
logging.error(traceback.format_exc())
cli.msg(botconfig.CHANNEL, "An error has occurred and has been logged.")
else:
logging.debug('Unhandled command {0}({1})'.format(cmd, [arg.decode('utf_8')
for arg in args
if isinstance(arg, bytes)]))
COMMANDS = {}
PM_COMMANDS = {}
HOOKS = {}
cmd = decorators.generate(COMMANDS)
pmcmd = decorators.generate(PM_COMMANDS)
hook = decorators.generate(HOOKS, raw_nick=True, permissions=False)
def connect_callback(cli):
def prepare_stuff(*args):
cli.join(botconfig.CHANNEL)
cli.msg("ChanServ", "op "+botconfig.CHANNEL)
cli.cap("REQ", "extended-join")
cli.cap("REQ", "account-notify")
try:
ld.MODULES[ld.CURRENT_MODULE].connect_callback(cli)
except AttributeError:
pass # no connect_callback for this one
cli.nick(botconfig.NICK) # very important (for regain/release)
prepare_stuff = hook("endofmotd", hookid=294)(prepare_stuff)
def mustregain(cli, *blah):
cli.ns_regain()
def mustrelease(cli, *rest):
cli.ns_release()
cli.nick(botconfig.NICK)
@hook("unavailresource", hookid=239)
@hook("nicknameinuse", hookid=239)
def must_use_temp_nick(cli, *etc):
cli.nick(botconfig.NICK+"_")
cli.user(botconfig.NICK, "")
decorators.unhook(HOOKS, 239)
hook("unavailresource")(mustrelease)
hook("nicknameinuse")(mustregain)
if botconfig.SASL_AUTHENTICATION:
@hook("authenticate")
def auth_plus(cli, something, plus):
if plus == "+":
nick_b = bytes(botconfig.USERNAME if botconfig.USERNAME else botconfig.NICK, "utf-8")
pass_b = bytes(botconfig.PASS, "utf-8")
secrt_msg = b'\0'.join((nick_b, nick_b, pass_b))
cli.send("AUTHENTICATE " + b64encode(secrt_msg).decode("utf-8"))
@hook("cap")
def on_cap(cli, svr, mynick, ack, cap):
if ack.upper() == "ACK" and "sasl" in cap:
cli.send("AUTHENTICATE PLAIN")
@hook("903")
def on_successful_auth(cli, blah, blahh, blahhh):
cli.cap("END")
@hook("904")
@hook("905")
@hook("906")
@hook("907")
def on_failure_auth(cli, *etc):
cli.quit()
print("Authentication failed. Did you fill the account name "+
"in botconfig.USERNAME if it's different from the bot nick?")
@hook("ping")
def on_ping(cli, prefix, server):
cli.send('PONG', server)
if botconfig.DEBUG_MODE:
@cmd("module", admin_only = True)
def ch_module(cli, nick, chan, rest):
rest = rest.strip()
if rest in ld.MODULES.keys():
ld.CURRENT_MODULE = rest
ld.MODULES[rest].connect_callback(cli)
cli.msg(chan, "Module {0} is now active.".format(rest))
else:
cli.msg(chan, "Module {0} does not exist.".format(rest))
@pmcmd("frestart", admin_only=True)
@cmd("frestart", admin_only=True)
def restart_program(cli, nick, *rest):
"""Restarts the bot."""
try:
if var.PHASE in ("day", "night"):
stop_game(cli)
else:
reset(cli)
cli.quit("Forced restart from "+nick)
raise SystemExit
finally:
print("RESTARTING")
python = sys.executable
if rest[-1].strip().lower() == "debugmode":
os.execl(python, python, sys.argv[0], "--debug")
elif rest[-1].strip().lower() == "normalmode":
os.execl(python, python, sys.argv[0])
elif rest[-1].strip().lower() == "verbosemode":
os.execl(python, python, sys.argv[0], "--verbose")
else:
os.execl(python, python, *sys.argv)
| bsd-2-clause |
13W/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/encoder.py | 484 | 25695 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a cStringIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
from google.protobuf.internal import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
local_chr = chr
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
local_chr = chr
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return "".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size)
for element in value:
encode_value(write, modify_value(element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, modify_value(value))
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write('\x00\x00\x80\x7F')
elif value == _NEG_INF:
write('\x00\x00\x80\xFF')
elif value != value: # NaN
write('\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write('\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write('\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write('\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = chr(0)
true_byte = chr(1)
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element))
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, local_len(value))
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = "".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField
| bsd-3-clause |
jessstrap/servotk | tests/wpt/web-platform-tests/old-tests/webdriver/navigation/auth_tests.py | 141 | 1389 | import os
import sys
import unittest
import ConfigParser
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from selenium.common import exceptions
from wptserve import server
from wptserve.router import any_method
from wptserve.handlers import basic_auth_handler
class WebDriverAuthTest(unittest.TestCase):
# Set up class to start HTTP Server that responds to
# test URLs with various 401 responses
@classmethod
def setUpClass(cls):
cls.driver = base_test.create_driver()
cls.webserver = server.WebTestHttpd(routes=[(any_method, "*", basic_auth_handler)])
cls.webserver.start()
@classmethod
def tearDownClass(cls):
cls.driver.quit()
cls.webserver.stop()
# Test that when 401 is seen by browser, a WebDriver response is still sent
def test_response_401_auth_basic(self):
page = self.webserver.get_url('navigation/res/authenticated.html')
self.driver.set_page_load_timeout(5)
try:
self.driver.get( page )
# if we got a responses instead of timeout, that's success
self.assertTrue(True)
except exceptions.TimeoutException:
self.fail("Did not get response from browser.")
except:
self.fail("Unexpected failure. Please investigate.")
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
SavageLearning/Machete | Machete.Util/python/employer_combined.py | 2 | 8342 | import collections
import ConfigParser
import smtplib
import string
from datetime import datetime
import MySQLdb
import requests
DEBUG = True
if DEBUG:
import pprint
NOT_A_FIELD = -1000
fields = {
NOT_A_FIELD: 'sid',
2: 'business',
3: 'name',
4: 'address1',
5: 'address2',
6: 'city',
7: 'state',
8: 'zipcode',
9: 'phone',
10: 'cellphone',
11: 'email',
12: 'referredBy',
13: 'referredbyOther',
14: 'blogparticipate',
15: 'returnCustomer',
16: 'notes',
18: 'contactName',
19: 'workSiteAddress1',
20: 'workSiteAddress2',
21: 'wo_city',
22: 'wo_state',
23: 'wo_zipcode',
24: 'wo_phone',
25: 'typeOfWorkID',
26: 'englishRequired',
27: 'englishRequiredNote',
28: 'lunchSupplied',
29: 'description',
30: 'date_needed',
31: 'time_needed',
32: 'timeFlexible',
33: 'transportMethodID',
43: 'receiveUpdates'
}
field_names = list(fields[key] for key in sorted(fields))
# Get settings
try:
_settings = ConfigParser.ConfigParser()
_settings.read('employer_form.ini')
except ConfigParser.Error as err:
raise
db_config = dict(_settings.items('db'))
machete_config = dict(_settings.items('machete'))
smtp_config = dict(_settings.items('smtp'))
webform_config = dict(_settings.items('webform'))
# Open connection to Database
db = MySQLdb.connect(db_config['host'],
db_config['user'],
db_config['pw'],
db_config['db_name'])
cursor = db.cursor()
# only grab webform entries without success = true in webform_machete table
cursor.execute("SELECT * from \
webform_submitted_data LEFT OUTER JOIN \
webform_machete ON webform_machete.sid = webform_submitted_data.sid \
WHERE (webform_machete.success <> 1 OR webform_machete.success IS NULL) \
AND webform_submitted_data.nid=%s", webform_config['id'])
form_data = cursor.fetchall()
entry_count = len(form_data) / (len(field_names) -1)
print "=================> entry_count: ", entry_count
if DEBUG:
print "------------------- form_data: ", pprint.pformat(form_data,
indent=4)
def cluster_form_submission_data(data):
'''group form fields by their sid (submission id)'''
form_submission_data = collections.defaultdict(list)
for field in data:
form_submission_data[field[1]].append(field[2:])
if DEBUG:
print "----------- form_submission_data: ", pprint.pformat(
form_submission_data, indent=4)
return form_submission_data
def convert_single_form_submission(sid, form_data):
'''convert a single submission to the format we need'''
return dict(zip(field_names, [sid, ] + [x[2] for x in sorted(form_data)]))
all_submissions = [convert_single_form_submission(sid, x) for sid, x in
cluster_form_submission_data(form_data).items()]
if DEBUG:
print "------------------- all_submissions: ", pprint.pformat(
all_submissions, indent=4)
# Logging
log_file = open('employer_combined.log', 'a')
now = datetime.now()
def log_entry(entry):
try:
log_file.write(now.strftime("%Y-%m-%d %I:%M%p :: ") + entry + "\n")
except:
mail('Machete log error', 'Not able to write to log file.')
# Send e-mail
def mail(subject='error', message='Error occurred'):
body = string.join((
"From: %s" % smtp_config['user'],
"To: %s" % smtp_config['to'],
"Subject: %s" % subject,
"",
message
), "\r\n")
mailServer = smtplib.SMTP('localhost')
mailServer.sendmail(smtp_config['user'], smtp_config['to'].split(','), body)
mailServer.quit()
if entry_count > 0:
log_entry("Script started")
log_entry("Entry Count: %s" % entry_count)
# login to machete
s = requests.session()
s.config['keep_alive'] = True
try:
login_response = s.post(url=machete_config['base_url'] + '/Account/Logon',
data={'UserName': machete_config['user'],
'Password': machete_config['pw']}, verify=False,
cert=(machete_config['cert'], machete_config['key']))
except:
print "!-------------- machete connection rejected"
mail("Machete connection rejected", "Exception raised while trying to connect to machete")
log_entry("Machete connection rejected")
exit()
if ('Login was unsuccessful' in login_response.text):
print "!-------------- login failed"
log_entry("Failed to login to Machete")
mail("Machete login failed", "Script could not login to machete")
exit()
# Process submissions
for send_data in all_submissions:
send_data['ID'] = '0'
dt_str = send_data['date_needed'] + " " + send_data['time_needed']
dt_obj = datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S')
send_data['dateTimeofWork'] = dt_obj.strftime("%m-%d-%Y %I:%M:%S %p")
try:
post_response = s.post(
url=machete_config['base_url'] + '/Employer/CreateCombined',
data=send_data, verify=False,
cert=(machete_config['cert'], machete_config['key']))
except:
print "!-------------- Update failed (MAIL SEND)"
log_entry("Failed post to Machete")
mail("Machete post failed",
"Posting the form to the Machete URL failed")
print "-------------- post_response: ", post_response.text
cursor.execute("SELECT sid FROM webform_machete WHERE sid='%s'",
send_data['sid'])
existing_entry = cursor.rowcount
print "send_data[sid]:", send_data['sid']
print "existing_entry: ", existing_entry
if (not 'an error occurred' in post_response.text and
post_response.json['jobSuccess']):
print "JobSuccess!"
if (existing_entry > 0):
try:
cursor.execute("UPDATE webform_machete SET success='1', \
tries=tries+1, last_attempt=NOW() WHERE sid='%s'",
(send_data['sid'],))
db.commit()
except:
print "!-------------- Update webform_machete table failed"
log_entry("Failed to update webform_machete table")
mail("update DB failed", "Updating webform_machete \
table failed")
else:
try:
cursor.execute("INSERT INTO webform_machete (sid, success, \
tries, last_attempt) VALUES('%s', '%s', 1, NOW())",
(send_data['sid'],
post_response.json['jobSuccess']))
db.commit()
except:
print "!-------------- Insert into webform_machete table failed"
log_entry("Failed to insert into webform_machete table")
mail("Insert DB failed", "Inserting into webform_machete \
table failed")
else: # jobSuccess failed
print "JobFailed!"
if (existing_entry > 0):
print "existing entry"
try:
cursor.execute("UPDATE webform_machete SET success=0, \
tries=tries+1, last_attempt=NOW() WHERE sid='%s'",
(send_data['sid'],))
db.commit()
except:
print "-------------- Update webform_machete table failed"
log_entry("Failed to update webform_machete table")
mail("update DB failed", "Updating webform_machete \
table failed")
else:
print "no existing entry"
try:
cursor.execute("INSERT INTO webform_machete (sid, success, \
tries, last_attempt) VALUES('%s', 0, 1, NOW())",
(send_data['sid'],))
db.commit()
except:
print "!-------------- Insert into webform_machete table failed"
log_entry("Failed to insert into webform_machete table")
mail("Insert DB failed", "Inserting into webform_machete \
table failed")
db.close()
if entry_count > 0:
log_entry("Script finished")
log_file.close()
| gpl-3.0 |
mitya57/django | tests/admin_docs/models.py | 244 | 1941 | """
Models for testing various aspects of the djang.contrib.admindocs app
"""
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=200)
class Group(models.Model):
name = models.CharField(max_length=200)
class Family(models.Model):
last_name = models.CharField(max_length=200)
class Person(models.Model):
"""
Stores information about a person, related to :model:`myapp.Company`.
**Notes**
Use ``save_changes()`` when saving this object.
``company``
Field storing :model:`myapp.Company` where the person works.
(DESCRIPTION)
.. raw:: html
:file: admin_docs/evilfile.txt
.. include:: admin_docs/evilfile.txt
"""
first_name = models.CharField(max_length=200, help_text="The person's first name")
last_name = models.CharField(max_length=200, help_text="The person's last name")
company = models.ForeignKey(Company, models.CASCADE, help_text="place of work")
family = models.ForeignKey(Family, models.SET_NULL, related_name='+', null=True)
groups = models.ManyToManyField(Group, help_text="has membership")
def _get_full_name(self):
return "%s %s" % (self.first_name, self.last_name)
def rename_company(self, new_name):
self.company.name = new_name
self.company.save()
return new_name
def dummy_function(self, baz, rox, *some_args, **some_kwargs):
return some_kwargs
def suffix_company_name(self, suffix='ltd'):
return self.company.name + suffix
def add_image(self):
pass
def delete_image(self):
pass
def save_changes(self):
pass
def set_status(self):
pass
def get_full_name(self):
"""
Get the full name of the person
"""
return self._get_full_name()
def get_status_count(self):
return 0
def get_groups_list(self):
return []
| bsd-3-clause |
TeamExodus/external_chromium_org | third_party/markupsafe/__init__.py | 371 | 8205 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from markupsafe._compat import text_type, string_types, int_types, \
unichr, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
#_escape_argspec(kwargs, kwargs.iteritems(), None)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
format = make_wrapper('format')
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_wrapper('__getslice__')
del method, make_wrapper
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| bsd-3-clause |
tuhangdi/django | django/db/models/manager.py | 301 | 10722 | import copy
import inspect
from importlib import import_module
from django.db import router
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
def ensure_default_manager(cls):
"""
Ensures that a Model subclass contains a default manager and sets the
_default_manager attribute on the class. Also sets up the _base_manager
points to a plain Manager instance (which could be the same as
_default_manager if it's not a subclass of Manager).
"""
if cls._meta.swapped:
setattr(cls, 'objects', SwappedManagerDescriptor(cls))
return
if not getattr(cls, '_default_manager', None):
if any(f.name == 'objects' for f in cls._meta.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'" % cls.__name__
)
# Create the default manager, if needed.
cls.add_to_class('objects', Manager())
cls._base_manager = cls.objects
elif not getattr(cls, '_base_manager', None):
default_mgr = cls._default_manager.__class__
if (default_mgr is Manager or
getattr(default_mgr, "use_for_related_fields", False)):
cls._base_manager = cls._default_manager
else:
# Default manager isn't a plain Manager class, or a suitable
# replacement, so we walk up the base class hierarchy until we hit
# something appropriate.
for base_class in default_mgr.mro()[1:]:
if (base_class is Manager or
getattr(base_class, "use_for_related_fields", False)):
cls.add_to_class('_base_manager', base_class())
return
raise AssertionError(
"Should never get here. Please report a bug, including your "
"model and model manager setup."
)
@python_2_unicode_compatible
class BaseManager(object):
# Tracks each time a Manager instance is created. Used to retain order.
creation_counter = 0
#: If set to True the manager will be serialized into migrations and will
#: thus be available in e.g. RunPython operations
use_in_migrations = False
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(BaseManager, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def __init__(self):
super(BaseManager, self).__init__()
self._set_creation_counter()
self.model = None
self.name = None
self._inherited = False
self._db = None
self._hints = {}
def __str__(self):
""" Return "app_label.model_label.manager_name". """
return '%s.%s' % (self.model._meta.label, self.name)
def deconstruct(self):
"""
Returns a 5-tuple of the form (as_manager (True), manager_class,
queryset_class, args, kwargs).
Raises a ValueError if the manager is dynamically generated.
"""
qs_class = self._queryset_class
if getattr(self, '_built_with_as_manager', False):
# using MyQuerySet.as_manager()
return (
True, # as_manager
None, # manager_class
'%s.%s' % (qs_class.__module__, qs_class.__name__), # qs_class
None, # args
None, # kwargs
)
else:
module_name = self.__module__
name = self.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find manager %s in %s.\n"
"Please note that you need to inherit from managers you "
"dynamically generated with 'from_queryset()'."
% (name, module_name)
)
return (
False, # as_manager
'%s.%s' % (module_name, name), # manager_class
None, # qs_class
self._constructor_args[0], # args
self._constructor_args[1], # kwargs
)
def check(self, **kwargs):
return []
@classmethod
def _get_queryset_methods(cls, queryset_class):
def create_method(name, method):
def manager_method(self, *args, **kwargs):
return getattr(self.get_queryset(), name)(*args, **kwargs)
manager_method.__name__ = method.__name__
manager_method.__doc__ = method.__doc__
return manager_method
new_methods = {}
# Refs http://bugs.python.org/issue1785.
predicate = inspect.isfunction if six.PY3 else inspect.ismethod
for name, method in inspect.getmembers(queryset_class, predicate=predicate):
# Only copy missing methods.
if hasattr(cls, name):
continue
# Only copy public methods or methods with the attribute `queryset_only=False`.
queryset_only = getattr(method, 'queryset_only', None)
if queryset_only or (queryset_only is None and name.startswith('_')):
continue
# Copy the method onto the manager.
new_methods[name] = create_method(name, method)
return new_methods
@classmethod
def from_queryset(cls, queryset_class, class_name=None):
if class_name is None:
class_name = '%sFrom%s' % (cls.__name__, queryset_class.__name__)
class_dict = {
'_queryset_class': queryset_class,
}
class_dict.update(cls._get_queryset_methods(queryset_class))
return type(class_name, (cls,), class_dict)
def contribute_to_class(self, model, name):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
if not self.name:
self.name = name
# Only contribute the manager if the model is concrete
if model._meta.abstract:
setattr(model, name, AbstractManagerDescriptor(model))
elif model._meta.swapped:
setattr(model, name, SwappedManagerDescriptor(model))
else:
# if not model._meta.abstract and not model._meta.swapped:
setattr(model, name, ManagerDescriptor(self))
if (not getattr(model, '_default_manager', None) or
self.creation_counter < model._default_manager.creation_counter):
model._default_manager = self
abstract = False
if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
abstract = True
model._meta.managers.append((self.creation_counter, self, abstract))
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
class-level copy.
"""
self.creation_counter = BaseManager.creation_counter
BaseManager.creation_counter += 1
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
assert issubclass(model, self.model)
mgr = copy.copy(self)
mgr._set_creation_counter()
mgr.model = model
mgr._inherited = True
return mgr
def db_manager(self, using=None, hints=None):
obj = copy.copy(self)
obj._db = using or self._db
obj._hints = hints or self._hints
return obj
@property
def db(self):
return self._db or router.db_for_read(self.model, **self._hints)
#######################
# PROXIES TO QUERYSET #
#######################
def get_queryset(self):
"""
Returns a new QuerySet object. Subclasses can override this method to
easily customize the behavior of the Manager.
"""
return self._queryset_class(model=self.model, using=self._db, hints=self._hints)
def all(self):
# We can't proxy this method through the `QuerySet` like we do for the
# rest of the `QuerySet` methods. This is because `QuerySet.all()`
# works by creating a "copy" of the current queryset and in making said
# copy, all the cached `prefetch_related` lookups are lost. See the
# implementation of `RelatedManager.get_queryset()` for a better
# understanding of how this comes into play.
return self.get_queryset()
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self._constructor_args == other._constructor_args
)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return id(self)
class Manager(BaseManager.from_queryset(QuerySet)):
pass
class ManagerDescriptor(object):
# This class ensures managers aren't accessible via model instances.
# For example, Poll.objects works, but poll_obj.objects raises AttributeError.
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, type=None):
if instance is not None:
raise AttributeError("Manager isn't accessible via %s instances" % type.__name__)
return self.manager
class AbstractManagerDescriptor(object):
# This class provides a better error message when you try to access a
# manager on an abstract model.
def __init__(self, model):
self.model = model
def __get__(self, instance, type=None):
raise AttributeError("Manager isn't available; %s is abstract" % (
self.model._meta.object_name,
))
class SwappedManagerDescriptor(object):
# This class provides a better error message when you try to access a
# manager on a swapped model.
def __init__(self, model):
self.model = model
def __get__(self, instance, type=None):
raise AttributeError(
"Manager isn't available; '%s.%s' has been swapped for '%s'" % (
self.model._meta.app_label,
self.model._meta.object_name,
self.model._meta.swapped,
)
)
class EmptyManager(Manager):
def __init__(self, model):
super(EmptyManager, self).__init__()
self.model = model
def get_queryset(self):
return super(EmptyManager, self).get_queryset().none()
| bsd-3-clause |
replicatorg/ReplicatorG | skein_engines/skeinforge-50/fabmetheus_utilities/svg_reader.py | 12 | 38714 | """
Svg reader.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.xml_simple_reader import DocumentNode
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from fabmetheus_utilities import svg_writer
import math
import os
import sys
import traceback
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalNumberOfCornerPoints = 11
globalNumberOfBezierPoints = globalNumberOfCornerPoints + globalNumberOfCornerPoints
globalNumberOfCirclePoints = 4 * globalNumberOfCornerPoints
def addFunctionsToDictionary( dictionary, functions, prefix ):
"Add functions to dictionary."
for function in functions:
dictionary[ function.__name__[ len( prefix ) : ] ] = function
def getArcComplexes(begin, end, largeArcFlag, radius, sweepFlag, xAxisRotation):
'Get the arc complexes, procedure at http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes'
if begin == end:
print('Warning, begin equals end in getArcComplexes in svgReader')
print(begin)
print(end)
return []
if radius.imag < 0.0:
print('Warning, radius.imag is less than zero in getArcComplexes in svgReader')
print(radius)
radius = complex(radius.real, abs(radius.imag))
if radius.real < 0.0:
print('Warning, radius.real is less than zero in getArcComplexes in svgReader')
print(radius)
radius = complex(abs(radius.real), radius.imag)
if radius.imag <= 0.0:
print('Warning, radius.imag is too small for getArcComplexes in svgReader')
print(radius)
return [end]
if radius.real <= 0.0:
print('Warning, radius.real is too small for getArcComplexes in svgReader')
print(radius)
return [end]
xAxisRotationComplex = euclidean.getWiddershinsUnitPolar(xAxisRotation)
reverseXAxisRotationComplex = complex(xAxisRotationComplex.real, -xAxisRotationComplex.imag)
beginRotated = begin * reverseXAxisRotationComplex
endRotated = end * reverseXAxisRotationComplex
beginTransformed = complex(beginRotated.real / radius.real, beginRotated.imag / radius.imag)
endTransformed = complex(endRotated.real / radius.real, endRotated.imag / radius.imag)
midpointTransformed = 0.5 * (beginTransformed + endTransformed)
midMinusBeginTransformed = midpointTransformed - beginTransformed
midMinusBeginTransformedLength = abs(midMinusBeginTransformed)
if midMinusBeginTransformedLength > 1.0:
print('The ellipse radius is too small for getArcComplexes in svgReader.')
print('So the ellipse will be scaled to fit, according to the formulas in "Step 3: Ensure radii are large enough" of:')
print('http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii')
print('')
radius *= midMinusBeginTransformedLength
beginTransformed /= midMinusBeginTransformedLength
endTransformed /= midMinusBeginTransformedLength
midpointTransformed /= midMinusBeginTransformedLength
midMinusBeginTransformed /= midMinusBeginTransformedLength
midMinusBeginTransformedLength = 1.0
midWiddershinsTransformed = complex(-midMinusBeginTransformed.imag, midMinusBeginTransformed.real)
midWiddershinsLengthSquared = 1.0 - midMinusBeginTransformedLength * midMinusBeginTransformedLength
if midWiddershinsLengthSquared < 0.0:
midWiddershinsLengthSquared = 0.0
midWiddershinsLength = math.sqrt(midWiddershinsLengthSquared)
midWiddershinsTransformed *= midWiddershinsLength / abs(midWiddershinsTransformed)
centerTransformed = midpointTransformed
if largeArcFlag == sweepFlag:
centerTransformed -= midWiddershinsTransformed
else:
centerTransformed += midWiddershinsTransformed
beginMinusCenterTransformed = beginTransformed - centerTransformed
beginMinusCenterTransformedLength = abs(beginMinusCenterTransformed)
if beginMinusCenterTransformedLength <= 0.0:
return end
beginAngle = math.atan2(beginMinusCenterTransformed.imag, beginMinusCenterTransformed.real)
endMinusCenterTransformed = endTransformed - centerTransformed
angleDifference = euclidean.getAngleDifferenceByComplex(endMinusCenterTransformed, beginMinusCenterTransformed)
if sweepFlag:
if angleDifference < 0.0:
angleDifference += 2.0 * math.pi
else:
if angleDifference > 0.0:
angleDifference -= 2.0 * math.pi
global globalSideAngle
sides = int(math.ceil(abs(angleDifference) / globalSideAngle))
sideAngle = angleDifference / float(sides)
arcComplexes = []
center = complex(centerTransformed.real * radius.real, centerTransformed.imag * radius.imag) * xAxisRotationComplex
for side in xrange(1, sides):
unitPolar = euclidean.getWiddershinsUnitPolar(beginAngle + float(side) * sideAngle)
circumferential = complex(unitPolar.real * radius.real, unitPolar.imag * radius.imag) * beginMinusCenterTransformedLength
point = center + circumferential * xAxisRotationComplex
arcComplexes.append(point)
arcComplexes.append(end)
return arcComplexes
def getChainMatrixSVG(elementNode, matrixSVG):
"Get chain matrixSVG by svgElement."
matrixSVG = matrixSVG.getOtherTimesSelf(getMatrixSVG(elementNode).tricomplex)
if elementNode.parentNode != None:
matrixSVG = getChainMatrixSVG(elementNode.parentNode, matrixSVG)
return matrixSVG
def getChainMatrixSVGIfNecessary(elementNode, yAxisPointingUpward):
"Get chain matrixSVG by svgElement and yAxisPointingUpward."
matrixSVG = MatrixSVG()
if yAxisPointingUpward:
return matrixSVG
return getChainMatrixSVG(elementNode, matrixSVG)
def getCubicPoint( along, begin, controlPoints, end ):
'Get the cubic point.'
segmentBegin = getQuadraticPoint( along, begin, controlPoints[0], controlPoints[1] )
segmentEnd = getQuadraticPoint( along, controlPoints[0], controlPoints[1], end )
return ( 1.0 - along ) * segmentBegin + along * segmentEnd
def getCubicPoints( begin, controlPoints, end, numberOfBezierPoints=globalNumberOfBezierPoints):
'Get the cubic points.'
bezierPortion = 1.0 / float(numberOfBezierPoints)
cubicPoints = []
for bezierIndex in xrange( 1, numberOfBezierPoints + 1 ):
cubicPoints.append(getCubicPoint(bezierPortion * bezierIndex, begin, controlPoints, end))
return cubicPoints
def getFontReader(fontFamily):
'Get the font reader for the fontFamily.'
fontLower = fontFamily.lower().replace(' ', '_')
global globalFontReaderDictionary
if fontLower in globalFontReaderDictionary:
return globalFontReaderDictionary[fontLower]
global globalFontFileNames
if globalFontFileNames == None:
globalFontFileNames = archive.getFileNamesByFilePaths(archive.getFilePathsByDirectory(getFontsDirectoryPath()))
if fontLower not in globalFontFileNames:
print('Warning, the %s font was not found in the fabmetheus_utilities/fonts folder, so Gentium Basic Regular will be substituted.' % fontFamily)
print('The available fonts are:')
globalFontFileNames.sort()
print(globalFontFileNames)
print('')
fontLower = 'gentium_basic_regular'
fontReader = FontReader(fontLower)
globalFontReaderDictionary[fontLower] = fontReader
return fontReader
def getFontsDirectoryPath():
"Get the fonts directory path."
return archive.getFabmetheusUtilitiesPath('fonts')
def getLabelString(dictionary):
"Get the label string for the dictionary."
for key in dictionary:
labelIndex = key.find('label')
if labelIndex >= 0:
return dictionary[key]
return ''
def getMatrixSVG(elementNode):
"Get matrixSVG by svgElement."
matrixSVG = MatrixSVG()
if 'transform' not in elementNode.attributes:
return matrixSVG
transformWords = []
for transformWord in elementNode.attributes['transform'].replace(')', '(').split('('):
transformWordStrip = transformWord.strip()
if transformWordStrip != '': # workaround for split(character) bug which leaves an extra empty element
transformWords.append(transformWordStrip)
global globalGetTricomplexDictionary
getTricomplexDictionaryKeys = globalGetTricomplexDictionary.keys()
for transformWordIndex, transformWord in enumerate(transformWords):
if transformWord in getTricomplexDictionaryKeys:
transformString = transformWords[transformWordIndex + 1].replace(',', ' ')
matrixSVG = matrixSVG.getSelfTimesOther(globalGetTricomplexDictionary[ transformWord ](transformString.split()))
return matrixSVG
def getQuadraticPoint( along, begin, controlPoint, end ):
'Get the quadratic point.'
oneMinusAlong = 1.0 - along
segmentBegin = oneMinusAlong * begin + along * controlPoint
segmentEnd = oneMinusAlong * controlPoint + along * end
return oneMinusAlong * segmentBegin + along * segmentEnd
def getQuadraticPoints(begin, controlPoint, end, numberOfBezierPoints=globalNumberOfBezierPoints):
'Get the quadratic points.'
bezierPortion = 1.0 / float(numberOfBezierPoints)
quadraticPoints = []
for bezierIndex in xrange(1, numberOfBezierPoints + 1):
quadraticPoints.append(getQuadraticPoint(bezierPortion * bezierIndex, begin, controlPoint, end))
return quadraticPoints
def getRightStripAlphabetPercent(word):
"Get word with alphabet characters and the percent sign stripped from the right."
word = word.strip()
for characterIndex in xrange(len(word) - 1, -1, -1):
character = word[characterIndex]
if not character.isalpha() and not character == '%':
return float(word[: characterIndex + 1])
return None
def getRightStripMinusSplit(lineString):
"Get string with spaces after the minus sign stripped."
oldLineStringLength = -1
while oldLineStringLength < len(lineString):
oldLineStringLength = len(lineString)
lineString = lineString.replace('- ', '-')
return lineString.split()
def getStrokeRadius(elementNode):
"Get the stroke radius."
return 0.5 * getRightStripAlphabetPercent(getStyleValue('1.0', elementNode, 'stroke-width'))
def getStyleValue(defaultValue, elementNode, key):
"Get the stroke value string."
if 'style' in elementNode.attributes:
line = elementNode.attributes['style']
strokeIndex = line.find(key)
if strokeIndex > -1:
words = line[strokeIndex :].replace(':', ' ').replace(';', ' ').split()
if len(words) > 1:
return words[1]
if key in elementNode.attributes:
return elementNode.attributes[key]
if elementNode.parentNode == None:
return defaultValue
return getStyleValue(defaultValue, elementNode.parentNode, key)
def getTextComplexLoops(fontFamily, fontSize, text, yAxisPointingUpward=True):
"Get text as complex loops."
textComplexLoops = []
fontReader = getFontReader(fontFamily)
horizontalAdvanceX = 0.0
for character in text:
glyph = fontReader.getGlyph(character, yAxisPointingUpward)
textComplexLoops += glyph.getSizedAdvancedLoops(fontSize, horizontalAdvanceX, yAxisPointingUpward)
horizontalAdvanceX += glyph.horizontalAdvanceX
return textComplexLoops
def getTransformedFillOutline(elementNode, loop, yAxisPointingUpward):
"Get the loops if fill is on, otherwise get the outlines."
fillOutlineLoops = None
if getStyleValue('none', elementNode, 'fill').lower() == 'none':
fillOutlineLoops = intercircle.getAroundsFromLoop(loop, getStrokeRadius(elementNode))
else:
fillOutlineLoops = [loop]
return getChainMatrixSVGIfNecessary(elementNode, yAxisPointingUpward).getTransformedPaths(fillOutlineLoops)
def getTransformedOutlineByPath(elementNode, path, yAxisPointingUpward):
"Get the outline from the path."
aroundsFromPath = intercircle.getAroundsFromPath(path, getStrokeRadius(elementNode))
return getChainMatrixSVGIfNecessary(elementNode, yAxisPointingUpward).getTransformedPaths(aroundsFromPath)
def getTransformedOutlineByPaths(elementNode, paths, yAxisPointingUpward):
"Get the outline from the paths."
aroundsFromPaths = intercircle.getAroundsFromPaths(paths, getStrokeRadius(elementNode))
return getChainMatrixSVGIfNecessary(elementNode, yAxisPointingUpward).getTransformedPaths(aroundsFromPaths)
def getTricomplexmatrix(transformWords):
"Get matrixSVG by transformWords."
tricomplex = [euclidean.getComplexByWords(transformWords)]
tricomplex.append(euclidean.getComplexByWords(transformWords, 2))
tricomplex.append(euclidean.getComplexByWords(transformWords, 4))
return tricomplex
def getTricomplexrotate(transformWords):
"Get matrixSVG by transformWords."
rotate = euclidean.getWiddershinsUnitPolar(math.radians(float(transformWords[0])))
return [rotate, complex(-rotate.imag,rotate.real), complex()]
def getTricomplexscale(transformWords):
"Get matrixSVG by transformWords."
scale = euclidean.getComplexByWords(transformWords)
return [complex(scale.real,0.0), complex(0.0,scale.imag), complex()]
def getTricomplexskewX(transformWords):
"Get matrixSVG by transformWords."
skewX = math.tan(math.radians(float(transformWords[0])))
return [complex(1.0, 0.0), complex(skewX, 1.0), complex()]
def getTricomplexskewY(transformWords):
"Get matrixSVG by transformWords."
skewY = math.tan(math.radians(float(transformWords[0])))
return [complex(1.0, skewY), complex(0.0, 1.0), complex()]
def getTricomplexTimesColumn(firstTricomplex, otherColumn):
"Get this matrix multiplied by the otherColumn."
dotProductX = firstTricomplex[0].real * otherColumn.real + firstTricomplex[1].real * otherColumn.imag
dotProductY = firstTricomplex[0].imag * otherColumn.real + firstTricomplex[1].imag * otherColumn.imag
return complex(dotProductX, dotProductY)
def getTricomplexTimesOther(firstTricomplex, otherTricomplex):
"Get the first tricomplex multiplied by the other tricomplex."
#A down, B right from http://en.wikipedia.org/wiki/Matrix_multiplication
tricomplexTimesOther = [getTricomplexTimesColumn(firstTricomplex, otherTricomplex[0])]
tricomplexTimesOther.append(getTricomplexTimesColumn(firstTricomplex, otherTricomplex[1]))
tricomplexTimesOther.append(getTricomplexTimesColumn(firstTricomplex, otherTricomplex[2]) + firstTricomplex[2])
return tricomplexTimesOther
def getTricomplextranslate(transformWords):
"Get matrixSVG by transformWords."
translate = euclidean.getComplexByWords(transformWords)
return [complex(1.0, 0.0), complex(0.0, 1.0), translate]
def processSVGElementcircle( elementNode, svgReader ):
"Process elementNode by svgReader."
attributes = elementNode.attributes
center = euclidean.getComplexDefaultByDictionaryKeys( complex(), attributes, 'cx', 'cy')
radius = euclidean.getFloatDefaultByDictionary( 0.0, attributes, 'r')
if radius == 0.0:
print('Warning, in processSVGElementcircle in svgReader radius is zero in:')
print(attributes)
return
global globalNumberOfCirclePoints
global globalSideAngle
loop = []
loopLayer = svgReader.getLoopLayer()
for side in xrange( globalNumberOfCirclePoints ):
unitPolar = euclidean.getWiddershinsUnitPolar( float(side) * globalSideAngle )
loop.append( center + radius * unitPolar )
loopLayer.loops += getTransformedFillOutline(elementNode, loop, svgReader.yAxisPointingUpward)
def processSVGElementellipse( elementNode, svgReader ):
"Process elementNode by svgReader."
attributes = elementNode.attributes
center = euclidean.getComplexDefaultByDictionaryKeys( complex(), attributes, 'cx', 'cy')
radius = euclidean.getComplexDefaultByDictionaryKeys( complex(), attributes, 'rx', 'ry')
if radius.real == 0.0 or radius.imag == 0.0:
print('Warning, in processSVGElementellipse in svgReader radius is zero in:')
print(attributes)
return
global globalNumberOfCirclePoints
global globalSideAngle
loop = []
loopLayer = svgReader.getLoopLayer()
for side in xrange( globalNumberOfCirclePoints ):
unitPolar = euclidean.getWiddershinsUnitPolar( float(side) * globalSideAngle )
loop.append( center + complex( unitPolar.real * radius.real, unitPolar.imag * radius.imag ) )
loopLayer.loops += getTransformedFillOutline(elementNode, loop, svgReader.yAxisPointingUpward)
def processSVGElementg(elementNode, svgReader):
'Process elementNode by svgReader.'
if 'id' not in elementNode.attributes:
return
idString = elementNode.attributes['id']
if 'beginningOfControlSection' in elementNode.attributes:
if elementNode.attributes['beginningOfControlSection'].lower()[: 1] == 't':
svgReader.stopProcessing = True
return
idStringLower = idString.lower()
zIndex = idStringLower.find('z:')
if zIndex < 0:
idStringLower = getLabelString(elementNode.attributes)
zIndex = idStringLower.find('z:')
if zIndex < 0:
return
floatFromValue = euclidean.getFloatFromValue(idStringLower[zIndex + len('z:') :].strip())
if floatFromValue != None:
svgReader.z = floatFromValue
def processSVGElementline(elementNode, svgReader):
"Process elementNode by svgReader."
begin = euclidean.getComplexDefaultByDictionaryKeys(complex(), elementNode.attributes, 'x1', 'y1')
end = euclidean.getComplexDefaultByDictionaryKeys(complex(), elementNode.attributes, 'x2', 'y2')
loopLayer = svgReader.getLoopLayer()
loopLayer.loops += getTransformedOutlineByPath(elementNode, [begin, end], svgReader.yAxisPointingUpward)
def processSVGElementpath( elementNode, svgReader ):
"Process elementNode by svgReader."
if 'd' not in elementNode.attributes:
print('Warning, in processSVGElementpath in svgReader can not get a value for d in:')
print(elementNode.attributes)
return
loopLayer = svgReader.getLoopLayer()
PathReader(elementNode, loopLayer.loops, svgReader.yAxisPointingUpward)
def processSVGElementpolygon( elementNode, svgReader ):
"Process elementNode by svgReader."
if 'points' not in elementNode.attributes:
print('Warning, in processSVGElementpolygon in svgReader can not get a value for d in:')
print(elementNode.attributes)
return
loopLayer = svgReader.getLoopLayer()
words = getRightStripMinusSplit(elementNode.attributes['points'].replace(',', ' '))
loop = []
for wordIndex in xrange( 0, len(words), 2 ):
loop.append(euclidean.getComplexByWords(words[wordIndex :]))
loopLayer.loops += getTransformedFillOutline(elementNode, loop, svgReader.yAxisPointingUpward)
def processSVGElementpolyline(elementNode, svgReader):
"Process elementNode by svgReader."
if 'points' not in elementNode.attributes:
print('Warning, in processSVGElementpolyline in svgReader can not get a value for d in:')
print(elementNode.attributes)
return
loopLayer = svgReader.getLoopLayer()
words = getRightStripMinusSplit(elementNode.attributes['points'].replace(',', ' '))
path = []
for wordIndex in xrange(0, len(words), 2):
path.append(euclidean.getComplexByWords(words[wordIndex :]))
loopLayer.loops += getTransformedOutlineByPath(elementNode, path, svgReader.yAxisPointingUpward)
def processSVGElementrect( elementNode, svgReader ):
"Process elementNode by svgReader."
attributes = elementNode.attributes
height = euclidean.getFloatDefaultByDictionary( 0.0, attributes, 'height')
if height == 0.0:
print('Warning, in processSVGElementrect in svgReader height is zero in:')
print(attributes)
return
width = euclidean.getFloatDefaultByDictionary( 0.0, attributes, 'width')
if width == 0.0:
print('Warning, in processSVGElementrect in svgReader width is zero in:')
print(attributes)
return
center = euclidean.getComplexDefaultByDictionaryKeys(complex(), attributes, 'x', 'y')
inradius = 0.5 * complex( width, height )
cornerRadius = euclidean.getComplexDefaultByDictionaryKeys( complex(), attributes, 'rx', 'ry')
loopLayer = svgReader.getLoopLayer()
if cornerRadius.real == 0.0 and cornerRadius.imag == 0.0:
inradiusMinusX = complex( - inradius.real, inradius.imag )
loop = [center + inradius, center + inradiusMinusX, center - inradius, center - inradiusMinusX]
loopLayer.loops += getTransformedFillOutline(elementNode, loop, svgReader.yAxisPointingUpward)
return
if cornerRadius.real == 0.0:
cornerRadius = complex( cornerRadius.imag, cornerRadius.imag )
elif cornerRadius.imag == 0.0:
cornerRadius = complex( cornerRadius.real, cornerRadius.real )
cornerRadius = complex( min( cornerRadius.real, inradius.real ), min( cornerRadius.imag, inradius.imag ) )
ellipsePath = [ complex( cornerRadius.real, 0.0 ) ]
inradiusMinusCorner = inradius - cornerRadius
loop = []
global globalNumberOfCornerPoints
global globalSideAngle
for side in xrange( 1, globalNumberOfCornerPoints ):
unitPolar = euclidean.getWiddershinsUnitPolar( float(side) * globalSideAngle )
ellipsePath.append( complex( unitPolar.real * cornerRadius.real, unitPolar.imag * cornerRadius.imag ) )
ellipsePath.append( complex( 0.0, cornerRadius.imag ) )
cornerPoints = []
for point in ellipsePath:
cornerPoints.append( point + inradiusMinusCorner )
cornerPointsReversed = cornerPoints[: : -1]
for cornerPoint in cornerPoints:
loop.append( center + cornerPoint )
for cornerPoint in cornerPointsReversed:
loop.append( center + complex( - cornerPoint.real, cornerPoint.imag ) )
for cornerPoint in cornerPoints:
loop.append( center - cornerPoint )
for cornerPoint in cornerPointsReversed:
loop.append( center + complex( cornerPoint.real, - cornerPoint.imag ) )
loop = euclidean.getLoopWithoutCloseSequentialPoints( 0.0001 * abs(inradius), loop )
loopLayer.loops += getTransformedFillOutline(elementNode, loop, svgReader.yAxisPointingUpward)
def processSVGElementtext(elementNode, svgReader):
"Process elementNode by svgReader."
if svgReader.yAxisPointingUpward:
return
fontFamily = getStyleValue('Gentium Basic Regular', elementNode, 'font-family')
fontSize = getRightStripAlphabetPercent(getStyleValue('12.0', elementNode, 'font-size'))
matrixSVG = getChainMatrixSVGIfNecessary(elementNode, svgReader.yAxisPointingUpward)
loopLayer = svgReader.getLoopLayer()
translate = euclidean.getComplexDefaultByDictionaryKeys(complex(), elementNode.attributes, 'x', 'y')
for textComplexLoop in getTextComplexLoops(fontFamily, fontSize, elementNode.getTextContent(), svgReader.yAxisPointingUpward):
translatedLoop = []
for textComplexPoint in textComplexLoop:
translatedLoop.append(textComplexPoint + translate )
loopLayer.loops.append(matrixSVG.getTransformedPath(translatedLoop))
class FontReader:
"Class to read a font in the fonts folder."
def __init__(self, fontFamily):
"Initialize."
self.fontFamily = fontFamily
self.glyphDictionary = {}
self.glyphElementNodeDictionary = {}
self.missingGlyph = None
fileName = os.path.join(getFontsDirectoryPath(), fontFamily + '.svg')
documentElement = DocumentNode(fileName, archive.getFileText(fileName)).getDocumentElement()
self.fontElementNode = documentElement.getFirstChildByLocalName('defs').getFirstChildByLocalName('font')
self.fontFaceElementNode = self.fontElementNode.getFirstChildByLocalName('font-face')
self.unitsPerEM = float(self.fontFaceElementNode.attributes['units-per-em'])
glyphElementNodes = self.fontElementNode.getChildElementsByLocalName('glyph')
for glyphElementNode in glyphElementNodes:
self.glyphElementNodeDictionary[glyphElementNode.attributes['unicode']] = glyphElementNode
def getGlyph(self, character, yAxisPointingUpward):
"Get the glyph for the character."
if character not in self.glyphElementNodeDictionary:
if self.missingGlyph == None:
missingGlyphElementNode = self.fontElementNode.getFirstChildByLocalName('missing-glyph')
self.missingGlyph = Glyph(missingGlyphElementNode, self.unitsPerEM, yAxisPointingUpward)
return self.missingGlyph
if character not in self.glyphDictionary:
self.glyphDictionary[character] = Glyph(self.glyphElementNodeDictionary[character], self.unitsPerEM, yAxisPointingUpward)
return self.glyphDictionary[character]
class Glyph:
"Class to handle a glyph."
def __init__(self, elementNode, unitsPerEM, yAxisPointingUpward):
"Initialize."
self.horizontalAdvanceX = float(elementNode.attributes['horiz-adv-x'])
self.loops = []
self.unitsPerEM = unitsPerEM
elementNode.attributes['fill'] = ''
if 'd' not in elementNode.attributes:
return
PathReader(elementNode, self.loops, yAxisPointingUpward)
def getSizedAdvancedLoops(self, fontSize, horizontalAdvanceX, yAxisPointingUpward=True):
"Get loops for font size, advanced horizontally."
multiplierX = fontSize / self.unitsPerEM
multiplierY = multiplierX
if not yAxisPointingUpward:
multiplierY = -multiplierY
sizedLoops = []
for loop in self.loops:
sizedLoop = []
sizedLoops.append(sizedLoop)
for point in loop:
sizedLoop.append( complex(multiplierX * (point.real + horizontalAdvanceX), multiplierY * point.imag))
return sizedLoops
class MatrixSVG:
"Two by three svg matrix."
def __init__(self, tricomplex=None):
"Initialize."
self.tricomplex = tricomplex
def __repr__(self):
"Get the string representation of this two by three svg matrix."
return str(self.tricomplex)
def getOtherTimesSelf(self, otherTricomplex):
"Get the other matrix multiplied by this matrix."
if otherTricomplex == None:
return MatrixSVG(self.tricomplex)
if self.tricomplex == None:
return MatrixSVG(otherTricomplex)
return MatrixSVG(getTricomplexTimesOther(otherTricomplex, self.tricomplex))
def getSelfTimesOther(self, otherTricomplex):
"Get this matrix multiplied by the other matrix."
if otherTricomplex == None:
return MatrixSVG(self.tricomplex)
if self.tricomplex == None:
return MatrixSVG(otherTricomplex)
return MatrixSVG(getTricomplexTimesOther(self.tricomplex, otherTricomplex))
def getTransformedPath(self, path):
"Get transformed path."
if self.tricomplex == None:
return path
complexX = self.tricomplex[0]
complexY = self.tricomplex[1]
complexTranslation = self.tricomplex[2]
transformedPath = []
for point in path:
x = complexX.real * point.real + complexY.real * point.imag
y = complexX.imag * point.real + complexY.imag * point.imag
transformedPath.append(complex(x, y) + complexTranslation)
return transformedPath
def getTransformedPaths(self, paths):
"Get transformed paths."
if self.tricomplex == None:
return paths
transformedPaths = []
for path in paths:
transformedPaths.append(self.getTransformedPath(path))
return transformedPaths
class PathReader:
"Class to read svg path."
def __init__(self, elementNode, loops, yAxisPointingUpward):
"Add to path string to loops."
self.controlPoints = None
self.elementNode = elementNode
self.loops = loops
self.oldPoint = None
self.outlinePaths = []
self.path = []
self.yAxisPointingUpward = yAxisPointingUpward
pathString = elementNode.attributes['d'].replace(',', ' ')
global globalProcessPathWordDictionary
processPathWordDictionaryKeys = globalProcessPathWordDictionary.keys()
for processPathWordDictionaryKey in processPathWordDictionaryKeys:
pathString = pathString.replace( processPathWordDictionaryKey, ' %s ' % processPathWordDictionaryKey )
self.words = getRightStripMinusSplit(pathString)
for self.wordIndex in xrange( len( self.words ) ):
word = self.words[ self.wordIndex ]
if word in processPathWordDictionaryKeys:
globalProcessPathWordDictionary[word](self)
if len(self.path) > 0:
self.outlinePaths.append(self.path)
self.loops += getTransformedOutlineByPaths(elementNode, self.outlinePaths, yAxisPointingUpward)
def addPathArc( self, end ):
"Add an arc to the path."
begin = self.getOldPoint()
self.controlPoints = None
radius = self.getComplexByExtraIndex(1)
xAxisRotation = math.radians(float(self.words[self.wordIndex + 3]))
largeArcFlag = euclidean.getBooleanFromValue(self.words[ self.wordIndex + 4 ])
sweepFlag = euclidean.getBooleanFromValue(self.words[ self.wordIndex + 5 ])
self.path += getArcComplexes(begin, end, largeArcFlag, radius, sweepFlag, xAxisRotation)
self.wordIndex += 8
def addPathCubic( self, controlPoints, end ):
"Add a cubic curve to the path."
begin = self.getOldPoint()
self.controlPoints = controlPoints
self.path += getCubicPoints( begin, controlPoints, end )
self.wordIndex += 7
def addPathCubicReflected( self, controlPoint, end ):
"Add a cubic curve to the path from a reflected control point."
begin = self.getOldPoint()
controlPointBegin = begin
if self.controlPoints != None:
if len(self.controlPoints) == 2:
controlPointBegin = begin + begin - self.controlPoints[-1]
self.controlPoints = [controlPointBegin, controlPoint]
self.path += getCubicPoints(begin, self.controlPoints, end)
self.wordIndex += 5
def addPathLine(self, lineFunction, point):
"Add a line to the path."
self.controlPoints = None
self.path.append(point)
self.wordIndex += 3
self.addPathLineByFunction(lineFunction)
def addPathLineAxis(self, point):
"Add an axis line to the path."
self.controlPoints = None
self.path.append(point)
self.wordIndex += 2
def addPathLineByFunction( self, lineFunction ):
"Add a line to the path by line function."
while 1:
if self.getFloatByExtraIndex() == None:
return
self.path.append(lineFunction())
self.wordIndex += 2
def addPathMove( self, lineFunction, point ):
"Add an axis line to the path."
self.controlPoints = None
if len(self.path) > 0:
self.outlinePaths.append(self.path)
self.oldPoint = self.path[-1]
self.path = [point]
self.wordIndex += 3
self.addPathLineByFunction(lineFunction)
def addPathQuadratic( self, controlPoint, end ):
"Add a quadratic curve to the path."
begin = self.getOldPoint()
self.controlPoints = [controlPoint]
self.path += getQuadraticPoints(begin, controlPoint, end)
self.wordIndex += 5
def addPathQuadraticReflected( self, end ):
"Add a quadratic curve to the path from a reflected control point."
begin = self.getOldPoint()
controlPoint = begin
if self.controlPoints != None:
if len( self.controlPoints ) == 1:
controlPoint = begin + begin - self.controlPoints[-1]
self.controlPoints = [ controlPoint ]
self.path += getQuadraticPoints(begin, controlPoint, end)
self.wordIndex += 3
def getComplexByExtraIndex( self, extraIndex=0 ):
'Get complex from the extraIndex.'
return euclidean.getComplexByWords(self.words, self.wordIndex + extraIndex)
def getComplexRelative(self):
"Get relative complex."
return self.getComplexByExtraIndex() + self.getOldPoint()
def getFloatByExtraIndex( self, extraIndex=0 ):
'Get float from the extraIndex.'
totalIndex = self.wordIndex + extraIndex
if totalIndex >= len(self.words):
return None
word = self.words[totalIndex]
if word[: 1].isalpha():
return None
return euclidean.getFloatFromValue(word)
def getOldPoint(self):
'Get the old point.'
if len(self.path) > 0:
return self.path[-1]
return self.oldPoint
def processPathWordA(self):
'Process path word A.'
self.addPathArc( self.getComplexByExtraIndex( 6 ) )
def processPathWorda(self):
'Process path word a.'
self.addPathArc(self.getComplexByExtraIndex(6) + self.getOldPoint())
def processPathWordC(self):
'Process path word C.'
end = self.getComplexByExtraIndex( 5 )
self.addPathCubic( [ self.getComplexByExtraIndex( 1 ), self.getComplexByExtraIndex(3) ], end )
def processPathWordc(self):
'Process path word C.'
begin = self.getOldPoint()
end = self.getComplexByExtraIndex( 5 )
self.addPathCubic( [ self.getComplexByExtraIndex( 1 ) + begin, self.getComplexByExtraIndex(3) + begin ], end + begin )
def processPathWordH(self):
"Process path word H."
beginY = self.getOldPoint().imag
self.addPathLineAxis(complex(float(self.words[self.wordIndex + 1]), beginY))
while 1:
floatByExtraIndex = self.getFloatByExtraIndex()
if floatByExtraIndex == None:
return
self.path.append(complex(floatByExtraIndex, beginY))
self.wordIndex += 1
def processPathWordh(self):
"Process path word h."
begin = self.getOldPoint()
self.addPathLineAxis(complex(float(self.words[self.wordIndex + 1]) + begin.real, begin.imag))
while 1:
floatByExtraIndex = self.getFloatByExtraIndex()
if floatByExtraIndex == None:
return
self.path.append(complex(floatByExtraIndex + self.getOldPoint().real, begin.imag))
self.wordIndex += 1
def processPathWordL(self):
"Process path word L."
self.addPathLine(self.getComplexByExtraIndex, self.getComplexByExtraIndex( 1 ))
def processPathWordl(self):
"Process path word l."
self.addPathLine(self.getComplexRelative, self.getComplexByExtraIndex(1) + self.getOldPoint())
def processPathWordM(self):
"Process path word M."
self.addPathMove(self.getComplexByExtraIndex, self.getComplexByExtraIndex(1))
def processPathWordm(self):
"Process path word m."
self.addPathMove(self.getComplexRelative, self.getComplexByExtraIndex(1) + self.getOldPoint())
def processPathWordQ(self):
'Process path word Q.'
self.addPathQuadratic( self.getComplexByExtraIndex( 1 ), self.getComplexByExtraIndex(3) )
def processPathWordq(self):
'Process path word q.'
begin = self.getOldPoint()
self.addPathQuadratic(self.getComplexByExtraIndex(1) + begin, self.getComplexByExtraIndex(3) + begin)
def processPathWordS(self):
'Process path word S.'
self.addPathCubicReflected( self.getComplexByExtraIndex( 1 ), self.getComplexByExtraIndex(3) )
def processPathWords(self):
'Process path word s.'
begin = self.getOldPoint()
self.addPathCubicReflected(self.getComplexByExtraIndex(1) + begin, self.getComplexByExtraIndex(3) + begin)
def processPathWordT(self):
'Process path word T.'
self.addPathQuadraticReflected( self.getComplexByExtraIndex( 1 ) )
def processPathWordt(self):
'Process path word t.'
self.addPathQuadraticReflected(self.getComplexByExtraIndex(1) + self.getOldPoint())
def processPathWordV(self):
"Process path word V."
beginX = self.getOldPoint().real
self.addPathLineAxis(complex(beginX, float(self.words[self.wordIndex + 1])))
while 1:
floatByExtraIndex = self.getFloatByExtraIndex()
if floatByExtraIndex == None:
return
self.path.append(complex(beginX, floatByExtraIndex))
self.wordIndex += 1
def processPathWordv(self):
"Process path word v."
begin = self.getOldPoint()
self.addPathLineAxis(complex(begin.real, float(self.words[self.wordIndex + 1]) + begin.imag))
while 1:
floatByExtraIndex = self.getFloatByExtraIndex()
if floatByExtraIndex == None:
return
self.path.append(complex(begin.real, floatByExtraIndex + self.getOldPoint().imag))
self.wordIndex += 1
def processPathWordZ(self):
"Process path word Z."
self.controlPoints = None
if len(self.path) < 1:
return
self.loops.append(getChainMatrixSVGIfNecessary(self.elementNode, self.yAxisPointingUpward).getTransformedPath(self.path))
self.oldPoint = self.path[0]
self.path = []
def processPathWordz(self):
"Process path word z."
self.processPathWordZ()
class SVGReader:
"An svg carving."
def __init__(self):
"Add empty lists."
self.loopLayers = []
self.sliceDictionary = None
self.stopProcessing = False
self.z = 0.0
def flipDirectLayer(self, loopLayer):
"Flip the y coordinate of the layer and direct the loops."
for loop in loopLayer.loops:
for pointIndex, point in enumerate(loop):
loop[pointIndex] = complex(point.real, -point.imag)
triangle_mesh.sortLoopsInOrderOfArea(True, loopLayer.loops)
for loopIndex, loop in enumerate(loopLayer.loops):
isInsideLoops = euclidean.getIsInFilledRegion(loopLayer.loops[: loopIndex], euclidean.getLeftPoint(loop))
intercircle.directLoop((not isInsideLoops), loop)
def getLoopLayer(self):
"Return the rotated loop layer."
if self.z != None:
loopLayer = euclidean.LoopLayer(self.z)
self.loopLayers.append(loopLayer)
self.z = None
return self.loopLayers[-1]
def parseSVG(self, fileName, svgText):
"Parse SVG text and store the layers."
self.fileName = fileName
xmlParser = DocumentNode(fileName, svgText)
self.documentElement = xmlParser.getDocumentElement()
if self.documentElement == None:
print('Warning, documentElement was None in parseSVG in SVGReader, so nothing will be done for:')
print(fileName)
return
self.parseSVGByElementNode(self.documentElement)
def parseSVGByElementNode(self, elementNode):
"Parse SVG by elementNode."
self.sliceDictionary = svg_writer.getSliceDictionary(elementNode)
self.yAxisPointingUpward = euclidean.getBooleanFromDictionary(False, self.sliceDictionary, 'yAxisPointingUpward')
self.processElementNode(elementNode)
if not self.yAxisPointingUpward:
for loopLayer in self.loopLayers:
self.flipDirectLayer(loopLayer)
def processElementNode(self, elementNode):
'Process the xml element.'
if self.stopProcessing:
return
lowerLocalName = elementNode.getNodeName().lower()
global globalProcessSVGElementDictionary
if lowerLocalName in globalProcessSVGElementDictionary:
try:
globalProcessSVGElementDictionary[lowerLocalName](elementNode, self)
except:
print('Warning, in processElementNode in svg_reader, could not process:')
print(elementNode)
traceback.print_exc(file=sys.stdout)
for childNode in elementNode.childNodes:
self.processElementNode(childNode)
globalFontFileNames = None
globalFontReaderDictionary = {}
globalGetTricomplexDictionary = {}
globalGetTricomplexFunctions = [
getTricomplexmatrix,
getTricomplexrotate,
getTricomplexscale,
getTricomplexskewX,
getTricomplexskewY,
getTricomplextranslate ]
globalProcessPathWordFunctions = [
PathReader.processPathWordA,
PathReader.processPathWorda,
PathReader.processPathWordC,
PathReader.processPathWordc,
PathReader.processPathWordH,
PathReader.processPathWordh,
PathReader.processPathWordL,
PathReader.processPathWordl,
PathReader.processPathWordM,
PathReader.processPathWordm,
PathReader.processPathWordQ,
PathReader.processPathWordq,
PathReader.processPathWordS,
PathReader.processPathWords,
PathReader.processPathWordT,
PathReader.processPathWordt,
PathReader.processPathWordV,
PathReader.processPathWordv,
PathReader.processPathWordZ,
PathReader.processPathWordz ]
globalProcessPathWordDictionary = {}
globalProcessSVGElementDictionary = {}
globalProcessSVGElementFunctions = [
processSVGElementcircle,
processSVGElementellipse,
processSVGElementg,
processSVGElementline,
processSVGElementpath,
processSVGElementpolygon,
processSVGElementpolyline,
processSVGElementrect,
processSVGElementtext ]
globalSideAngle = 0.5 * math.pi / float( globalNumberOfCornerPoints )
addFunctionsToDictionary( globalGetTricomplexDictionary, globalGetTricomplexFunctions, 'getTricomplex')
addFunctionsToDictionary( globalProcessPathWordDictionary, globalProcessPathWordFunctions, 'processPathWord')
addFunctionsToDictionary( globalProcessSVGElementDictionary, globalProcessSVGElementFunctions, 'processSVGElement')
| gpl-2.0 |
raqqun/tweetcommander | packages/oauthlib/oauth2/ext/django.py | 1 | 5068 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
import functools
import logging
from oauthlib.common import urlencode
from oauthlib.oauth2.draft25 import errors
log = logging.getLogger('oauthlib')
class OAuth2ProviderDecorator(object):
def __init__(self, error_uri, server=None, authorization_endpoint=None,
token_endpoint=None, resource_endpoint=None):
self._authorization_endpoint = authorization_endpoint or server
self._token_endpoint = token_endpoint or server
self._resource_endpoint = resource_endpoint or server
self._error_uri = error_uri
def _extract_params(self, request):
log.debug('Extracting parameters from request.')
uri = request.build_absolute_uri()
http_method = request.method
headers = request.META
del headers['wsgi.input']
del headers['wsgi.errors']
if 'HTTP_AUTHORIZATION' in headers:
headers['Authorization'] = headers['HTTP_AUTHORIZATION']
body = urlencode(request.POST.items())
return uri, http_method, body, headers
def pre_authorization_view(self, f):
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
uri, http_method, body, headers = self._extract_params(request)
redirect_uri = request.GET.get('redirect_uri', None)
log.debug('Found redirect uri %s.', redirect_uri)
try:
scopes, credentials = self._authorization_endpoint.validate_authorization_request(
uri, http_method, body, headers)
log.debug('Saving credentials to session, %r.', credentials)
request.session['oauth2_credentials'] = credentials
kwargs['scopes'] = scopes
kwargs.update(credentials)
log.debug('Invoking view method, %r.', f)
return f(request, *args, **kwargs)
except errors.FatalClientError as e:
log.debug('Fatal client error, redirecting to error page.')
return HttpResponseRedirect(e.in_uri(self._error_uri))
return wrapper
def post_authorization_view(self, f):
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
uri, http_method, body, headers = self._extract_params(request)
scopes, credentials = f(request, *args, **kwargs)
log.debug('Fetched credentials view, %r.', credentials)
credentials.update(request.session.get('oauth2_credentials', {}))
log.debug('Fetched credentials from session, %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect uri %s.', redirect_uri)
try:
url, headers, body, status = self._authorization_endpoint.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful, redirecting to client.')
return HttpResponseRedirect(url)
except errors.FatalClientError as e:
log.debug('Fatal client error, redirecting to error page.')
return HttpResponseRedirect(e.in_uri(self._error_uri))
except errors.OAuth2Error as e:
log.debug('Client error, redirecting back to client.')
return HttpResponseRedirect(e.in_uri(redirect_uri))
return wrapper
def access_token_view(self, f):
@csrf_exempt
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
uri, http_method, body, headers = self._extract_params(request)
credentials = f(request, *args, **kwargs)
log.debug('Fetched credentials view, %r.', credentials)
url, headers, body, status = self._token_endpoint.create_token_response(
uri, http_method, body, headers, credentials)
response = HttpResponse(content=body, status=status)
for k, v in headers:
response[k] = v
return response
return wrapper
def protected_resource_view(self, scopes=None):
def decorator(f):
@csrf_exempt
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
uri, http_method, body, headers = self._extract_params(request)
valid, r = self._resource_endpoint.verify_request(
uri, http_method, body, headers, scopes)
kwargs.update({
'client': r.client,
'user': r.user,
'scopes': r.scopes
})
if valid:
return f(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return wrapper
return decorator
| gpl-3.0 |
MziRintu/kitsune | kitsune/kbforums/feeds.py | 17 | 2176 | from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from django.utils.feedgenerator import Atom1Feed
from django.utils.html import strip_tags, escape
from tower import ugettext as _
from kitsune import forums as constants
from kitsune.kbforums.models import Thread
from kitsune.wiki.models import Document
class ThreadsFeed(Feed):
feed_type = Atom1Feed
def get_object(self, request, document_slug):
return get_object_or_404(Document,
slug=document_slug,
locale=request.LANGUAGE_CODE,
allow_discussion=True)
def title(self, document):
return _('Recently updated threads about %s') % document.title
def link(self, document):
return document.get_absolute_url()
def items(self, document):
return document.thread_set.order_by(
'-last_post__created')[:constants.THREADS_PER_PAGE]
def item_title(self, item):
return item.title
def item_author_name(self, item):
return item.creator
def item_pubdate(self, item):
return item.created
class PostsFeed(Feed):
feed_type = Atom1Feed
def get_object(self, request, document_slug, thread_id):
doc = get_object_or_404(Document,
slug=document_slug,
locale=request.LANGUAGE_CODE,
allow_discussion=True)
return get_object_or_404(Thread, pk=thread_id, document=doc)
def title(self, thread):
return _('Recent posts in %s') % thread.title
def link(self, thread):
return thread.get_absolute_url()
def description(self, thread):
return self.title(thread)
def items(self, thread):
return thread.post_set.order_by('-created')
def item_title(self, item):
return strip_tags(item.content_parsed)[:100]
def item_description(self, item):
return escape(item.content_parsed)
def item_author_name(self, item):
return item.creator
def item_pubdate(self, item):
return item.created
| bsd-3-clause |
pgmillon/ansible | lib/ansible/module_utils/network/checkpoint/checkpoint.py | 2 | 9275 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
import time
from ansible.module_utils.connection import Connection
checkpoint_argument_spec_for_objects = dict(
auto_publish_session=dict(type='bool'),
wait_for_task=dict(type='bool', default=True),
state=dict(type='str', required=True, choices=['present', 'absent']),
version=dict(type='str')
)
checkpoint_argument_spec_for_facts = dict(
version=dict(type='str')
)
checkpoint_argument_spec_for_commands = dict(
wait_for_task=dict(type='bool', default=True),
version=dict(type='str')
)
# send the request to checkpoint
def send_request(connection, version, url, payload=None):
code, response = connection.send_request('/web_api/' + version + url, payload)
return code, response
# get the payload from the user parameters
def is_checkpoint_param(parameter):
if parameter == 'auto_publish_session' or\
parameter == 'state' or\
parameter == 'wait_for_task' or\
parameter == 'version':
return False
return True
# build the payload from the parameters which has value (not None), and they are parameter of checkpoint API as well
def get_payload_from_parameters(module):
payload = {}
for parameter in module.params:
if module.params[parameter] and is_checkpoint_param(parameter):
payload[parameter.replace("_", "-")] = module.params[parameter]
return payload
# wait for task
def wait_for_task(module, version, connection, task_id):
task_id_payload = {'task-id': task_id}
task_complete = False
current_iteration = 0
max_num_iterations = 300
# As long as there is a task in progress
while not task_complete and current_iteration < max_num_iterations:
current_iteration += 1
# Check the status of the task
code, response = send_request(connection, version, 'show-task', task_id_payload)
attempts_counter = 0
while code != 200:
if attempts_counter < 5:
attempts_counter += 1
time.sleep(2)
code, response = send_request(connection, version, 'show-task', task_id_payload)
else:
response['message'] = "ERROR: Failed to handle asynchronous tasks as synchronous, tasks result is" \
" undefined.\n" + response['message']
module.fail_json(msg=response)
# Count the number of tasks that are not in-progress
completed_tasks = 0
for task in response['tasks']:
if task['status'] == 'failed':
module.fail_json(msg='Task {0} with task id {1} failed. Look at the logs for more details'
.format(task['task-name'], task['task-id']))
if task['status'] == 'in progress':
break
completed_tasks += 1
# Are we done? check if all tasks are completed
if completed_tasks == len(response["tasks"]):
task_complete = True
else:
time.sleep(2) # Wait for two seconds
if not task_complete:
module.fail_json(msg="ERROR: Timeout.\nTask-id: {0}.".format(task_id_payload['task-id']))
# handle publish command, and wait for it to end if the user asked so
def handle_publish(module, connection, version):
if module.params['auto_publish_session']:
publish_code, publish_response = send_request(connection, version, 'publish')
if publish_code != 200:
module.fail_json(msg=publish_response)
if module.params['wait_for_task']:
wait_for_task(module, version, connection, publish_response['task-id'])
# handle a command
def api_command(module, command):
payload = get_payload_from_parameters(module)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params['version'] else ''
code, response = send_request(connection, version, command, payload)
result = {'changed': True}
if code == 200:
if module.params['wait_for_task']:
if 'task-id' in response:
wait_for_task(module, version, connection, response['task-id'])
elif 'tasks' in response:
for task_id in response['tasks']:
wait_for_task(module, version, connection, task_id)
result[command] = response
else:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
return result
# handle api call
def api_call(module, api_call_object):
payload = get_payload_from_parameters(module)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params['version'] else ''
payload_for_equals = {'type': api_call_object, 'params': payload}
equals_code, equals_response = send_request(connection, version, 'equals', payload_for_equals)
# if code is 400 (bad request) or 500 (internal error) - fail
if equals_code == 400 or equals_code == 500:
module.fail_json(msg=equals_response)
result = {'changed': False}
if module.params['state'] == 'present':
if equals_code == 200:
if not equals_response['equals']:
code, response = send_request(connection, version, 'set-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
else:
# objects are equals and there is no need for set request
pass
elif equals_code == 404:
code, response = send_request(connection, version, 'add-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
else:
# state == absent
if equals_code == 200:
code, response = send_request(connection, version, 'delete-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
elif equals_code == 404:
# no need to delete because object dose not exist
pass
result['checkpoint_session_uid'] = connection.get_session_uid()
return result
checkpoint_argument_spec = dict(auto_publish_session=dict(type='bool', default=True),
policy_package=dict(type='str', default='standard'),
auto_install_policy=dict(type='bool', default=True),
targets=dict(type='list')
)
def publish(connection, uid=None):
payload = None
if uid:
payload = {'uid': uid}
connection.send_request('/web_api/publish', payload)
def discard(connection, uid=None):
payload = None
if uid:
payload = {'uid': uid}
connection.send_request('/web_api/discard', payload)
def install_policy(connection, policy_package, targets):
payload = {'policy-package': policy_package,
'targets': targets}
connection.send_request('/web_api/install-policy', payload)
| gpl-3.0 |
asadziach/tensorflow | tensorflow/python/framework/test_util_test.py | 10 | 9697 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node 'seven"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
node_def = ops._NodeDef("op_type", "name")
node_def_orig = ops._NodeDef("op_type_orig", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 8)
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Cannot assign a device to node"):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
def testAssertAllCloseAccordingToType(self):
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
def testRandomSeed(self):
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
ken-saka/docker-registry | scripts/diff-worker.py | 35 | 3389 | #!/usr/bin/env python
import argparse # noqa
import logging
import os
import redis
from docker_registry.lib import layers
from docker_registry.lib import rlock
from docker_registry.lib import rqueue
import docker_registry.storage as storage
store = storage.load()
redis_default_host = os.environ.get(
'DOCKER_REDIS_1_PORT_6379_TCP_ADDR',
'0.0.0.0')
redis_default_port = int(os.environ.get(
'DOCKER_REDIS_1_PORT_6379_TCP_PORT',
'6379'))
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def get_parser():
parser = argparse.ArgumentParser(
description="Daemon for computing layer diffs"
)
parser.add_argument(
"--rhost", default=redis_default_host, dest="redis_host",
help="Host of redis instance to listen to",
)
parser.add_argument(
"--rport", default=redis_default_port, dest="redis_port", type=int,
help="Port of redis instance to listen to",
)
parser.add_argument(
"-d", "--database", default=0, dest="redis_db",
type=int, metavar="redis_db",
help="Redis database to connect to",
)
parser.add_argument(
"-p", "--password", default=None, metavar="redis_pw", dest="redis_pw",
help="Redis database password",
)
return parser
def get_redis_connection(options):
redis_conn = redis.StrictRedis(
host=options.redis_host,
port=options.redis_port,
db=options.redis_db,
password=options.redis_pw,
)
return redis_conn
def handle_request(layer_id, redis_conn):
'''handler for any item pulled from worker job queue
This handler is called every time the worker is able to pop a message
from the job queue filled by the registry. The worker blocks until a
message is available. This handler will then attempt to aquire a lock
for the provided layer_id and if successful, process a diff for the
layer.
If the lock for this layer_id has already been aquired for this layer
the worker will immediately timeout to block for another request.
'''
try:
# this with-context will attempt to establish a 5 minute lock
# on the key for this layer, immediately passing on LockTimeout
# if one isn't availble
with rlock.Lock(redis_conn,
"diff-worker-lock",
layer_id,
expires=60 * 5):
# first check if a cached result is already available. The registry
# already does this, but hey.
diff_data = layers.get_image_diff_cache(layer_id)
if not diff_data:
log.info("Processing diff for %s" % layer_id)
layers.get_image_diff_json(layer_id)
except rlock.LockTimeout:
log.info("Another worker is processing %s. Skipping." % layer_id)
if __name__ == '__main__':
parser = get_parser()
options = parser.parse_args()
redis_conn = get_redis_connection(options)
# create a bounded queue holding registry requests for diff calculations
queue = rqueue.CappedCollection(redis_conn, "diff-worker", 1024)
# initialize worker factory with the queue and redis connection
worker_factory = rqueue.worker(queue, redis_conn)
# create worker instance with our handler
worker = worker_factory(handle_request)
log.info("Starting worker...")
worker()
| apache-2.0 |
SEMAFORInformatik/femagtools | examples/model-creation/stator1-magnetIronV.py | 1 | 1583 | import femagtools
def create_fsl():
machine = dict(
name="PM 130 L4",
lfe=0.1,
poles=4,
outer_diam=0.13,
bore_diam=0.07,
inner_diam=0.015,
airgap=0.001,
stator=dict(
num_slots=12,
stator1=dict(
slot_rf1=0.057,
tip_rh1=0.037,
tip_rh2=0.037,
tooth_width=0.009,
slot_width=0.003)
),
magnet=dict(
magnetIronV=dict(
magn_height=4e-3,
magn_width=18e-3,
magn_angle=130,
iron_hs=1e-3,
iron_height=2e-3,
gap_ma_iron=1e-3,
air_triangle=1,
condshaft_r=12e-3,
magn_rem=1.2,
magn_num=2,
iron_shape=0)
),
windings=dict(
num_phases=3,
num_wires=100,
coil_span=3.0,
num_layers=1)
)
return femagtools.create_fsl(machine)
if __name__ == '__main__':
import os
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
modelname = os.path.split(__file__)[-1].split('.')[0]
logger = logging.getLogger(modelname)
workdir = os.path.join(os.path.expanduser('~'), 'femag')
with open(os.path.join(workdir, modelname+'.fsl'), 'w') as f:
f.write('\n'.join(create_fsl()))
logger.info("FSL %s created",
os.path.join(workdir, modelname+'.fsl'))
| bsd-2-clause |
Kalyzee/edx-platform | lms/djangoapps/edxnotes/views.py | 72 | 3809 | """
Views related to EdxNotes.
"""
import json
import logging
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.conf import settings
from django.core.urlresolvers import reverse
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from util.json_request import JsonResponse, JsonResponseBadRequest
from edxnotes.exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
from edxnotes.helpers import (
get_notes,
get_id_token,
is_feature_enabled,
search,
get_course_position,
)
log = logging.getLogger(__name__)
@login_required
def edxnotes(request, course_id):
"""
Displays the EdxNotes page.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
try:
notes = get_notes(request.user, course)
except EdxNotesServiceUnavailable:
raise Http404
context = {
"course": course,
"search_endpoint": reverse("search_notes", kwargs={"course_id": course_id}),
"notes": notes,
"debug": json.dumps(settings.DEBUG),
'position': None,
}
if not notes:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2
)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
position = get_course_position(course_module)
if position:
context.update({
'position': position,
})
return render_to_response("edxnotes/edxnotes.html", context)
@login_required
def search_notes(request, course_id):
"""
Handles search requests.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if "text" not in request.GET:
return HttpResponseBadRequest()
query_string = request.GET["text"]
try:
search_results = search(request.user, course, query_string)
except (EdxNotesParseError, EdxNotesServiceUnavailable) as err:
return JsonResponseBadRequest({"error": err.message}, status=500)
return HttpResponse(search_results)
# pylint: disable=unused-argument
@login_required
def get_token(request, course_id):
"""
Get JWT ID-Token, in case you need new one.
"""
return HttpResponse(get_id_token(request.user), content_type='text/plain')
@login_required
def edxnotes_visibility(request, course_id):
"""
Handle ajax call from "Show notes" checkbox.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
field_data_cache = FieldDataCache([course], course_key, request.user)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
if not is_feature_enabled(course):
raise Http404
try:
visibility = json.loads(request.body)["visibility"]
course_module.edxnotes_visibility = visibility
course_module.save()
return JsonResponse(status=200)
except (ValueError, KeyError):
log.warning(
"Could not decode request body as JSON and find a boolean visibility field: '%s'", request.body
)
return JsonResponseBadRequest()
| agpl-3.0 |
lseyesl/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py | 119 | 29255 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
import logging
import unittest2 as unittest
from webkitpy.common.net import bugzilla
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.bot.commitqueuetask import *
from webkitpy.tool.bot.expectedfailures import ExpectedFailures
from webkitpy.tool.mocktool import MockTool
_log = logging.getLogger(__name__)
class MockCommitQueue(CommitQueueTaskDelegate):
def __init__(self, error_plan):
self._error_plan = error_plan
self._failure_status_id = 0
def run_command(self, command):
_log.info("run_webkit_patch: %s" % command)
if self._error_plan:
error = self._error_plan.pop(0)
if error:
raise error
def command_passed(self, success_message, patch):
_log.info("command_passed: success_message='%s' patch='%s'" % (
success_message, patch.id()))
def command_failed(self, failure_message, script_error, patch):
_log.info("command_failed: failure_message='%s' script_error='%s' patch='%s'" % (
failure_message, script_error, patch.id()))
self._failure_status_id += 1
return self._failure_status_id
def refetch_patch(self, patch):
return patch
def expected_failures(self):
return ExpectedFailures()
def test_results(self):
return None
def report_flaky_tests(self, patch, flaky_results, results_archive):
flaky_tests = [result.filename for result in flaky_results]
_log.info("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), flaky_tests, results_archive.filename))
def archive_last_test_results(self, patch):
_log.info("archive_last_test_results: patch='%s'" % patch.id())
archive = Mock()
archive.filename = "mock-archive-%s.zip" % patch.id()
return archive
def build_style(self):
return "both"
def did_pass_testing_ews(self, patch):
return False
class FailingTestCommitQueue(MockCommitQueue):
def __init__(self, error_plan, test_failure_plan):
MockCommitQueue.__init__(self, error_plan)
self._test_run_counter = -1 # Special value to indicate tests have never been run.
self._test_failure_plan = test_failure_plan
def run_command(self, command):
if command[0] == "build-and-test":
self._test_run_counter += 1
MockCommitQueue.run_command(self, command)
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def test_results(self):
# Doesn't make sense to ask for the test_results until the tests have run at least once.
assert(self._test_run_counter >= 0)
failures_for_run = self._test_failure_plan[self._test_run_counter]
results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
# This makes the results trustable by ExpectedFailures.
results.set_failure_limit_count(10)
return results
# We use GoldenScriptError to make sure that the code under test throws the
# correct (i.e., golden) exception.
class GoldenScriptError(ScriptError):
pass
class CommitQueueTaskTest(unittest.TestCase):
def _run_through_task(self, commit_queue, expected_logs, expected_exception=None, expect_retry=False):
self.maxDiff = None
tool = MockTool(log_executive=True)
patch = tool.bugs.fetch_attachment(10000)
task = CommitQueueTask(commit_queue, patch)
success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs, expected_exception=expected_exception)
if not expected_exception:
self.assertEqual(success, not expect_retry)
return task
def test_success_case(self):
commit_queue = MockCommitQueue([])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_fast_success_case(self):
commit_queue = MockCommitQueue([])
commit_queue.did_pass_testing_ews = lambda patch: True
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_clean_failure(self):
commit_queue = MockCommitQueue([
ScriptError("MOCK clean failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_failed: failure_message='Unable to clean working directory' script_error='MOCK clean failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, expect_retry=True)
def test_update_failure(self):
commit_queue = MockCommitQueue([
None,
ScriptError("MOCK update failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_failed: failure_message='Unable to update working directory' script_error='MOCK update failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, expect_retry=True)
def test_apply_failure(self):
commit_queue = MockCommitQueue([
None,
None,
GoldenScriptError("MOCK apply failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_failed: failure_message='Patch does not apply' script_error='MOCK apply failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def test_validate_changelog_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
GoldenScriptError("MOCK validate failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_failed: failure_message='ChangeLog did not pass validation' script_error='MOCK validate failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def test_build_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
GoldenScriptError("MOCK build failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
command_passed: success_message='Able to build without patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def test_red_build_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
ScriptError("MOCK build failure"),
ScriptError("MOCK clean build failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
command_failed: failure_message='Unable to build without patch' script_error='MOCK clean build failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, expect_retry=True)
def test_flaky_test_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK tests failure"),
])
# CommitQueueTask will only report flaky tests if we successfully parsed
# results.json and returned a LayoutTestResults object, so we fake one.
commit_queue.test_results = lambda: LayoutTestResults([])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_failed_archive(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK tests failure"),
])
commit_queue.test_results = lambda: LayoutTestResults([])
# It's possible delegate to fail to archive layout tests, don't try to report
# flaky tests when that happens.
commit_queue.archive_last_test_results = lambda patch: None
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_double_flaky_test_failure(self):
commit_queue = FailingTestCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
], [
"foo.html",
"bar.html",
"foo.html",
])
# The (subtle) point of this test is that report_flaky_tests does not appear
# in the expected_logs for this run.
# Note also that there is no attempt to run the tests w/o the patch.
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
"""
tool = MockTool(log_executive=True)
patch = tool.bugs.fetch_attachment(10000)
task = CommitQueueTask(commit_queue, patch)
success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs)
self.assertFalse(success)
def test_test_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
None,
GoldenScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_passed: success_message='Able to pass tests without patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def test_red_test_failure(self):
commit_queue = FailingTestCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
ScriptError("MOCK clean test failure"),
], [
"foo.html",
"foo.html",
"foo.html",
])
# Tests always fail, and always return the same results, but we
# should still be able to land in this case!
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_very_red_tree_retry(self):
lots_of_failing_tests = map(lambda num: "test-%s.html" % num, range(0, 100))
commit_queue = FailingTestCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
ScriptError("MOCK clean test failure"),
], [
lots_of_failing_tests,
lots_of_failing_tests,
lots_of_failing_tests,
])
# Tests always fail, and return so many failures that we do not
# trust the results (see ExpectedFailures._can_trust_results) so we
# just give up and retry the patch.
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, expect_retry=True)
def test_red_tree_patch_rejection(self):
commit_queue = FailingTestCommitQueue([
None,
None,
None,
None,
None,
GoldenScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
ScriptError("MOCK clean test failure"),
], [
["foo.html", "bar.html"],
["foo.html", "bar.html"],
["foo.html"],
])
# Tests always fail, but the clean tree only fails one test
# while the patch fails two. So we should reject the patch!
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
"""
task = self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
self.assertEqual(task.results_from_patch_test_run(task._patch).failing_tests(), ["foo.html", "bar.html"])
# failure_status_id should be of the test with patch (1), not the test without patch (2).
self.assertEqual(task.failure_status_id, 1)
def test_land_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
None,
None,
GoldenScriptError("MOCK land failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000'
"""
# FIXME: This should really be expect_retry=True for a better user experiance.
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def _expect_validate(self, patch, is_valid):
class MockDelegate(object):
def refetch_patch(self, patch):
return patch
def expected_failures(self):
return ExpectedFailures()
task = CommitQueueTask(MockDelegate(), patch)
self.assertEqual(task.validate(), is_valid)
def _mock_patch(self, attachment_dict={}, bug_dict={'bug_status': 'NEW'}, committer="fake"):
bug = bugzilla.Bug(bug_dict, None)
patch = bugzilla.Attachment(attachment_dict, bug)
patch._committer = committer
return patch
def test_validate(self):
self._expect_validate(self._mock_patch(), True)
self._expect_validate(self._mock_patch({'is_obsolete': True}), False)
self._expect_validate(self._mock_patch(bug_dict={'bug_status': 'CLOSED'}), False)
self._expect_validate(self._mock_patch(committer=None), False)
self._expect_validate(self._mock_patch({'review': '-'}), False)
| bsd-3-clause |
AkA84/edx-platform | lms/djangoapps/certificates/migrations/0016_change_course_key_fields.py | 103 | 6642 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'GeneratedCertificate.course_id'
db.alter_column('certificates_generatedcertificate', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
# Changing field 'CertificateWhitelist.course_id'
db.alter_column('certificates_certificatewhitelist', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
def backwards(self, orm):
# Changing field 'GeneratedCertificate.course_id'
db.alter_column('certificates_generatedcertificate', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'CertificateWhitelist.course_id'
db.alter_column('certificates_certificatewhitelist', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.certificatewhitelist': {
'Meta': {'object_name': 'CertificateWhitelist'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
Erotemic/ibeis | ibeis/algo/graph/tests/test_graph_iden.py | 1 | 5170 | import utool as ut
from ibeis.algo.graph import demo
from ibeis.algo.graph.state import (POSTV, NEGTV, INCMP, UNREV)
def test_incomp_inference():
infr = demo.demodata_infr(num_pccs=0)
# Make 2 consistent and 2 inconsistent CCs
infr.add_feedback(( 1, 2), POSTV)
infr.add_feedback(( 2, 3), POSTV)
infr.add_feedback(( 3, 4), POSTV)
infr.add_feedback(( 4, 1), POSTV)
# -----
infr.add_feedback((11, 12), POSTV)
infr.add_feedback((12, 13), POSTV)
infr.add_feedback((13, 14), POSTV)
infr.add_feedback((14, 11), POSTV)
infr.add_feedback((12, 14), NEGTV)
# -----
infr.add_feedback((21, 22), POSTV)
infr.add_feedback((22, 23), POSTV)
infr.add_feedback((23, 21), NEGTV)
# -----
infr.add_feedback((31, 32), POSTV)
infr.add_feedback((32, 33), POSTV)
infr.add_feedback((33, 31), POSTV)
infr.add_feedback(( 2, 32), NEGTV)
infr.add_feedback(( 3, 33), NEGTV)
infr.add_feedback((12, 21), NEGTV)
# -----
# Incomparable within CCs
print('==========================')
infr.add_feedback(( 1, 3), INCMP)
infr.add_feedback(( 1, 4), INCMP)
infr.add_feedback(( 1, 2), INCMP)
infr.add_feedback((11, 13), INCMP)
infr.add_feedback((11, 14), INCMP)
infr.add_feedback((11, 12), INCMP)
infr.add_feedback(( 1, 31), INCMP)
infr.add_feedback(( 2, 32), INCMP)
infr.add_feedback((12, 21), INCMP)
infr.add_feedback((23, 21), INCMP)
infr.add_feedback((12, 14), INCMP)
print('Final state:')
print(ut.repr4(sorted(infr.gen_edge_attrs('decision'))))
def test_unrev_inference():
infr = demo.demodata_infr(num_pccs=0)
# Make 2 consistent and 2 inconsistent CCs
infr.add_feedback(( 1, 2), POSTV)
infr.add_feedback(( 2, 3), POSTV)
infr.add_feedback(( 3, 4), POSTV)
infr.add_feedback(( 4, 1), POSTV)
# -----
infr.add_feedback((11, 12), POSTV)
infr.add_feedback((12, 13), POSTV)
infr.add_feedback((13, 14), POSTV)
infr.add_feedback((14, 11), POSTV)
infr.add_feedback((12, 14), NEGTV)
# -----
infr.add_feedback((21, 22), POSTV)
infr.add_feedback((22, 23), POSTV)
infr.add_feedback((23, 21), NEGTV)
# -----
infr.add_feedback((31, 32), POSTV)
infr.add_feedback((32, 33), POSTV)
infr.add_feedback((33, 31), POSTV)
infr.add_feedback(( 2, 32), NEGTV)
infr.add_feedback(( 3, 33), NEGTV)
infr.add_feedback((12, 21), NEGTV)
# -----
# Incomparable within CCs
print('==========================')
infr.add_feedback(( 1, 3), UNREV)
infr.add_feedback(( 1, 4), UNREV)
infr.add_feedback(( 1, 2), UNREV)
infr.add_feedback((11, 13), UNREV)
infr.add_feedback((11, 14), UNREV)
infr.add_feedback((11, 12), UNREV)
infr.add_feedback(( 1, 31), UNREV)
infr.add_feedback(( 2, 32), UNREV)
infr.add_feedback((12, 21), UNREV)
infr.add_feedback((23, 21), UNREV)
infr.add_feedback((12, 14), UNREV)
print('Final state:')
print(ut.repr4(sorted(infr.gen_edge_attrs('decision'))))
def test_pos_neg():
infr = demo.demodata_infr(num_pccs=0)
# Make 3 inconsistent CCs
infr.add_feedback(( 1, 2), POSTV)
infr.add_feedback(( 2, 3), POSTV)
infr.add_feedback(( 3, 4), POSTV)
infr.add_feedback(( 4, 1), POSTV)
infr.add_feedback(( 1, 3), NEGTV)
# -----
infr.add_feedback((11, 12), POSTV)
infr.add_feedback((12, 13), POSTV)
infr.add_feedback((13, 11), NEGTV)
# -----
infr.add_feedback((21, 22), POSTV)
infr.add_feedback((22, 23), POSTV)
infr.add_feedback((23, 21), NEGTV)
# -----
# Fix inconsistency
infr.add_feedback((23, 21), POSTV)
# Merge inconsistent CCS
infr.add_feedback(( 1, 11), POSTV)
# Negative edge within an inconsistent CC
infr.add_feedback(( 2, 13), NEGTV)
# Negative edge external to an inconsistent CC
infr.add_feedback((12, 21), NEGTV)
# -----
# Make inconsistency from positive
infr.add_feedback((31, 32), POSTV)
infr.add_feedback((33, 34), POSTV)
infr.add_feedback((31, 33), NEGTV)
infr.add_feedback((32, 34), NEGTV)
infr.add_feedback((31, 34), POSTV)
# Fix everything
infr.add_feedback(( 1, 3), POSTV)
infr.add_feedback(( 2, 4), POSTV)
infr.add_feedback((32, 34), POSTV)
infr.add_feedback((31, 33), POSTV)
infr.add_feedback((13, 11), POSTV)
infr.add_feedback((23, 21), POSTV)
infr.add_feedback(( 1, 11), NEGTV)
print('Final state:')
print(ut.repr4(sorted(infr.gen_edge_attrs('decision'))))
if __name__ == '__main__':
r"""
CommandLine:
export PYTHONPATH=$PYTHONPATH:/home/joncrall/code/ibeis/ibeis/algo/graph/tests
python ~/code/ibeis/ibeis/algo/graph/tests/test_graph_iden.py test_pos_neg
python ~/code/ibeis/ibeis/algo/graph/tests/test_graph_iden.py test_unrev_inference
python ~/code/ibeis/ibeis/algo/graph/tests/test_graph_iden.py test_incomp_inference
python ~/code/ibeis/ibeis/algo/graph/tests/test_graph_iden.py --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
# import ubelt as ub
| apache-2.0 |
ZhaoYonggang198/xpdiff | default_visitor.py | 1 | 1875 | try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
def element_to_string(element):
return ET.tostring(element).replace("\r", "").replace("\n", "").replace("\t", "")
class StdVisitor:
def out(self, path, op, content):
print("\t\t\t".join(["".join([e.tag for e in path ]), op, content]))
def attribAdd(self, path, other_element, attribAdd):
self.out(path, "modify", element_to_string(other_element)+" attribute "+str(attribAdd)+" are added")
def attribRemove(self, path, element, attribs):
self.out(path, "modify", element_to_string(element)+" attribute "+str(attribs)+" are removed")
def attribModify(self, path, self_element, other_element, attribs):
self.out(path, "modify", "from "+element_to_string(self_element)+" to "+ET.tostring(other_element) + " attribs "+str(attribs)+" are modified")
def childElementAdd(self, path, element, children):
for child in children:
newpath = path[0:]
newpath.append(child)
self.out( newpath, "add", "node "+element_to_string(child))
def childElementRemove(self, path, self_element, children):
for child in children:
newpath = path[0:]
newpath.append(child)
self.out( newpath, "remove", "node "+element_to_string(child))
if __name__ == '__main__':
visitor = StdVisitor()
tree = ET.fromstring("<parent.><child. attr='value'></child.></parent.>")
element = tree.getchildren()[0]
path = [tree, element]
attribs = ['attr']
visitor.attribAdd(path, element, attribs)
visitor.attribRemove(path, element, attribs)
tree1 = ET.fromstring("<parent.><child. attr='value1'></child.></parent.>")
element1 = tree1.getchildren()[0]
visitor.attribModify(path, element, element1, attribs)
path.remove(element)
childrens = tree.getchildren()
visitor.childElementAdd(path, tree, childrens)
visitor.childElementRemove(path, tree, childrens)
| gpl-2.0 |
sauloal/pycluster | pypy-1.9_64/lib-python/2.7/dummy_threading.py | 321 | 2804 | """Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
The module ``_dummy_threading`` is added to ``sys.modules`` in order
to not have ``threading`` considered imported. Had ``threading`` been
directly imported it would have made all subsequent imports succeed
regardless of whether ``thread`` was available which is not desired.
"""
from sys import modules as sys_modules
import dummy_thread
# Declaring now so as to not have to nest ``try``s to get proper clean-up.
holding_thread = False
holding_threading = False
holding__threading_local = False
try:
# Could have checked if ``thread`` was not in sys.modules and gone
# a different route, but decided to mirror technique used with
# ``threading`` below.
if 'thread' in sys_modules:
held_thread = sys_modules['thread']
holding_thread = True
# Must have some module named ``thread`` that implements its API
# in order to initially import ``threading``.
sys_modules['thread'] = sys_modules['dummy_thread']
if 'threading' in sys_modules:
# If ``threading`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held_threading = sys_modules['threading']
holding_threading = True
del sys_modules['threading']
if '_threading_local' in sys_modules:
# If ``_threading_local`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held__threading_local = sys_modules['_threading_local']
holding__threading_local = True
del sys_modules['_threading_local']
import threading
# Need a copy of the code kept somewhere...
sys_modules['_dummy_threading'] = sys_modules['threading']
del sys_modules['threading']
sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
del sys_modules['_threading_local']
from _dummy_threading import *
from _dummy_threading import __all__
finally:
# Put back ``threading`` if we overwrote earlier
if holding_threading:
sys_modules['threading'] = held_threading
del held_threading
del holding_threading
# Put back ``_threading_local`` if we overwrote earlier
if holding__threading_local:
sys_modules['_threading_local'] = held__threading_local
del held__threading_local
del holding__threading_local
# Put back ``thread`` if we overwrote, else del the entry we made
if holding_thread:
sys_modules['thread'] = held_thread
del held_thread
else:
del sys_modules['thread']
del holding_thread
del dummy_thread
del sys_modules
| mit |
40223119/2015w11 | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/colordict.py | 621 | 24077 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
THECOLORS = {
'gray17' : (43, 43, 43, 255) ,
'gold' : (255, 215, 0, 255) ,
'gray10' : (26, 26, 26, 255) ,
'yellow' : (255, 255, 0, 255) ,
'gray11' : (28, 28, 28, 255) ,
'grey61' : (156, 156, 156, 255) ,
'grey60' : (153, 153, 153, 255) ,
'darkseagreen' : (143, 188, 143, 255) ,
'grey62' : (158, 158, 158, 255) ,
'grey65' : (166, 166, 166, 255) ,
'gray12' : (31, 31, 31, 255) ,
'grey67' : (171, 171, 171, 255) ,
'grey66' : (168, 168, 168, 255) ,
'grey69' : (176, 176, 176, 255) ,
'gray21' : (54, 54, 54, 255) ,
'lightsalmon4' : (139, 87, 66, 255) ,
'lightsalmon2' : (238, 149, 114, 255) ,
'lightsalmon3' : (205, 129, 98, 255) ,
'lightsalmon1' : (255, 160, 122, 255) ,
'gray32' : (82, 82, 82, 255) ,
'green4' : (0, 139, 0, 255) ,
'gray30' : (77, 77, 77, 255) ,
'gray31' : (79, 79, 79, 255) ,
'green1' : (0, 255, 0, 255) ,
'gray37' : (94, 94, 94, 255) ,
'green3' : (0, 205, 0, 255) ,
'green2' : (0, 238, 0, 255) ,
'darkslategray1' : (151, 255, 255, 255) ,
'darkslategray2' : (141, 238, 238, 255) ,
'darkslategray3' : (121, 205, 205, 255) ,
'aquamarine1' : (127, 255, 212, 255) ,
'aquamarine3' : (102, 205, 170, 255) ,
'aquamarine2' : (118, 238, 198, 255) ,
'papayawhip' : (255, 239, 213, 255) ,
'black' : (0, 0, 0, 255) ,
'darkorange3' : (205, 102, 0, 255) ,
'oldlace' : (253, 245, 230, 255) ,
'lightgoldenrod4' : (139, 129, 76, 255) ,
'gray90' : (229, 229, 229, 255) ,
'orchid1' : (255, 131, 250, 255) ,
'orchid2' : (238, 122, 233, 255) ,
'orchid3' : (205, 105, 201, 255) ,
'grey68' : (173, 173, 173, 255) ,
'brown' : (165, 42, 42, 255) ,
'purple2' : (145, 44, 238, 255) ,
'gray80' : (204, 204, 204, 255) ,
'antiquewhite3' : (205, 192, 176, 255) ,
'antiquewhite2' : (238, 223, 204, 255) ,
'antiquewhite1' : (255, 239, 219, 255) ,
'palevioletred3' : (205, 104, 137, 255) ,
'hotpink' : (255, 105, 180, 255) ,
'lightcyan' : (224, 255, 255, 255) ,
'coral3' : (205, 91, 69, 255) ,
'gray8' : (20, 20, 20, 255) ,
'gray9' : (23, 23, 23, 255) ,
'grey32' : (82, 82, 82, 255) ,
'bisque4' : (139, 125, 107, 255) ,
'cyan' : (0, 255, 255, 255) ,
'gray0' : (0, 0, 0, 255) ,
'gray1' : (3, 3, 3, 255) ,
'gray6' : (15, 15, 15, 255) ,
'bisque1' : (255, 228, 196, 255) ,
'bisque2' : (238, 213, 183, 255) ,
'bisque3' : (205, 183, 158, 255) ,
'skyblue' : (135, 206, 235, 255) ,
'gray' : (190, 190, 190, 255) ,
'darkturquoise' : (0, 206, 209, 255) ,
'rosybrown4' : (139, 105, 105, 255) ,
'deepskyblue3' : (0, 154, 205, 255) ,
'grey63' : (161, 161, 161, 255) ,
'indianred1' : (255, 106, 106, 255) ,
'grey78' : (199, 199, 199, 255) ,
'lightpink' : (255, 182, 193, 255) ,
'gray88' : (224, 224, 224, 255) ,
'gray22' : (56, 56, 56, 255) ,
'red' : (255, 0, 0, 255) ,
'grey11' : (28, 28, 28, 255) ,
'lemonchiffon3' : (205, 201, 165, 255) ,
'lemonchiffon2' : (238, 233, 191, 255) ,
'lemonchiffon1' : (255, 250, 205, 255) ,
'indianred3' : (205, 85, 85, 255) ,
'violetred1' : (255, 62, 150, 255) ,
'plum2' : (238, 174, 238, 255) ,
'plum1' : (255, 187, 255, 255) ,
'lemonchiffon4' : (139, 137, 112, 255) ,
'gray99' : (252, 252, 252, 255) ,
'grey13' : (33, 33, 33, 255) ,
'grey55' : (140, 140, 140, 255) ,
'darkcyan' : (0, 139, 139, 255) ,
'chocolate4' : (139, 69, 19, 255) ,
'lightgoldenrodyellow' : (250, 250, 210, 255) ,
'gray54' : (138, 138, 138, 255) ,
'lavender' : (230, 230, 250, 255) ,
'chartreuse3' : (102, 205, 0, 255) ,
'chartreuse2' : (118, 238, 0, 255) ,
'chartreuse1' : (127, 255, 0, 255) ,
'grey48' : (122, 122, 122, 255) ,
'grey16' : (41, 41, 41, 255) ,
'thistle' : (216, 191, 216, 255) ,
'chartreuse4' : (69, 139, 0, 255) ,
'darkorchid4' : (104, 34, 139, 255) ,
'grey42' : (107, 107, 107, 255) ,
'grey41' : (105, 105, 105, 255) ,
'grey17' : (43, 43, 43, 255) ,
'dimgrey' : (105, 105, 105, 255) ,
'dodgerblue4' : (16, 78, 139, 255) ,
'darkorchid2' : (178, 58, 238, 255) ,
'darkorchid3' : (154, 50, 205, 255) ,
'blue' : (0, 0, 255, 255) ,
'rosybrown2' : (238, 180, 180, 255) ,
'honeydew' : (240, 255, 240, 255) ,
'gray18' : (46, 46, 46, 255) ,
'cornflowerblue' : (100, 149, 237, 255) ,
'grey91' : (232, 232, 232, 255) ,
'gray14' : (36, 36, 36, 255) ,
'gray15' : (38, 38, 38, 255) ,
'gray16' : (41, 41, 41, 255) ,
'maroon4' : (139, 28, 98, 255) ,
'maroon3' : (205, 41, 144, 255) ,
'maroon2' : (238, 48, 167, 255) ,
'maroon1' : (255, 52, 179, 255) ,
'gray13' : (33, 33, 33, 255) ,
'gold3' : (205, 173, 0, 255) ,
'gold2' : (238, 201, 0, 255) ,
'gold1' : (255, 215, 0, 255) ,
'grey79' : (201, 201, 201, 255) ,
'palevioletred1' : (255, 130, 171, 255) ,
'palevioletred2' : (238, 121, 159, 255) ,
'gold4' : (139, 117, 0, 255) ,
'gray41' : (105, 105, 105, 255) ,
'gray84' : (214, 214, 214, 255) ,
'mediumpurple' : (147, 112, 219, 255) ,
'rosybrown1' : (255, 193, 193, 255) ,
'lightblue2' : (178, 223, 238, 255) ,
'lightblue3' : (154, 192, 205, 255) ,
'grey57' : (145, 145, 145, 255) ,
'lightblue1' : (191, 239, 255, 255) ,
'lightblue4' : (104, 131, 139, 255) ,
'gray33' : (84, 84, 84, 255) ,
'skyblue4' : (74, 112, 139, 255) ,
'grey97' : (247, 247, 247, 255) ,
'skyblue1' : (135, 206, 255, 255) ,
'gray27' : (69, 69, 69, 255) ,
'skyblue3' : (108, 166, 205, 255) ,
'skyblue2' : (126, 192, 238, 255) ,
'lavenderblush1' : (255, 240, 245, 255) ,
'darkgrey' : (169, 169, 169, 255) ,
'lavenderblush3' : (205, 193, 197, 255) ,
'darkslategrey' : (47, 79, 79, 255) ,
'lavenderblush4' : (139, 131, 134, 255) ,
'deeppink4' : (139, 10, 80, 255) ,
'grey99' : (252, 252, 252, 255) ,
'gray36' : (92, 92, 92, 255) ,
'coral4' : (139, 62, 47, 255) ,
'magenta3' : (205, 0, 205, 255) ,
'lightskyblue4' : (96, 123, 139, 255) ,
'mediumturquoise' : (72, 209, 204, 255) ,
'gray34' : (87, 87, 87, 255) ,
'floralwhite' : (255, 250, 240, 255) ,
'grey39' : (99, 99, 99, 255) ,
'grey36' : (92, 92, 92, 255) ,
'grey37' : (94, 94, 94, 255) ,
'grey34' : (87, 87, 87, 255) ,
'gray26' : (66, 66, 66, 255) ,
'royalblue2' : (67, 110, 238, 255) ,
'grey33' : (84, 84, 84, 255) ,
'turquoise1' : (0, 245, 255, 255) ,
'grey31' : (79, 79, 79, 255) ,
'steelblue1' : (99, 184, 255, 255) ,
'sienna4' : (139, 71, 38, 255) ,
'steelblue3' : (79, 148, 205, 255) ,
'lavenderblush2' : (238, 224, 229, 255) ,
'sienna1' : (255, 130, 71, 255) ,
'steelblue4' : (54, 100, 139, 255) ,
'sienna3' : (205, 104, 57, 255) ,
'aquamarine4' : (69, 139, 116, 255) ,
'lightyellow1' : (255, 255, 224, 255) ,
'lightyellow2' : (238, 238, 209, 255) ,
'lightsteelblue' : (176, 196, 222, 255) ,
'lightyellow4' : (139, 139, 122, 255) ,
'magenta2' : (238, 0, 238, 255) ,
'lightskyblue1' : (176, 226, 255, 255) ,
'lightgoldenrod' : (238, 221, 130, 255) ,
'magenta4' : (139, 0, 139, 255) ,
'gray87' : (222, 222, 222, 255) ,
'greenyellow' : (173, 255, 47, 255) ,
'navajowhite4' : (139, 121, 94, 255) ,
'darkslategray4' : (82, 139, 139, 255) ,
'olivedrab' : (107, 142, 35, 255) ,
'navajowhite1' : (255, 222, 173, 255) ,
'navajowhite2' : (238, 207, 161, 255) ,
'darkgoldenrod1' : (255, 185, 15, 255) ,
'sienna' : (160, 82, 45, 255) ,
'blue1' : (0, 0, 255, 255) ,
'yellow1' : (255, 255, 0, 255) ,
'gray61' : (156, 156, 156, 255) ,
'magenta1' : (255, 0, 255, 255) ,
'grey52' : (133, 133, 133, 255) ,
'orangered4' : (139, 37, 0, 255) ,
'palegreen' : (152, 251, 152, 255) ,
'gray86' : (219, 219, 219, 255) ,
'grey80' : (204, 204, 204, 255) ,
'seashell' : (255, 245, 238, 255) ,
'royalblue' : (65, 105, 225, 255) ,
'firebrick3' : (205, 38, 38, 255) ,
'blue4' : (0, 0, 139, 255) ,
'peru' : (205, 133, 63, 255) ,
'gray60' : (153, 153, 153, 255) ,
'aquamarine' : (127, 255, 212, 255) ,
'grey53' : (135, 135, 135, 255) ,
'tan4' : (139, 90, 43, 255) ,
'darkgoldenrod' : (184, 134, 11, 255) ,
'tan2' : (238, 154, 73, 255) ,
'tan1' : (255, 165, 79, 255) ,
'darkslategray' : (47, 79, 79, 255) ,
'royalblue3' : (58, 95, 205, 255) ,
'red2' : (238, 0, 0, 255) ,
'red1' : (255, 0, 0, 255) ,
'dodgerblue' : (30, 144, 255, 255) ,
'violetred4' : (139, 34, 82, 255) ,
'lightyellow' : (255, 255, 224, 255) ,
'paleturquoise1' : (187, 255, 255, 255) ,
'firebrick2' : (238, 44, 44, 255) ,
'mediumaquamarine' : (102, 205, 170, 255) ,
'lemonchiffon' : (255, 250, 205, 255) ,
'chocolate' : (210, 105, 30, 255) ,
'orchid4' : (139, 71, 137, 255) ,
'maroon' : (176, 48, 96, 255) ,
'gray38' : (97, 97, 97, 255) ,
'darkorange4' : (139, 69, 0, 255) ,
'mintcream' : (245, 255, 250, 255) ,
'darkorange1' : (255, 127, 0, 255) ,
'antiquewhite' : (250, 235, 215, 255) ,
'darkorange2' : (238, 118, 0, 255) ,
'grey18' : (46, 46, 46, 255) ,
'grey19' : (48, 48, 48, 255) ,
'grey38' : (97, 97, 97, 255) ,
'moccasin' : (255, 228, 181, 255) ,
'grey10' : (26, 26, 26, 255) ,
'chocolate1' : (255, 127, 36, 255) ,
'chocolate2' : (238, 118, 33, 255) ,
'chocolate3' : (205, 102, 29, 255) ,
'saddlebrown' : (139, 69, 19, 255) ,
'grey15' : (38, 38, 38, 255) ,
'darkslateblue' : (72, 61, 139, 255) ,
'lightskyblue' : (135, 206, 250, 255) ,
'gray69' : (176, 176, 176, 255) ,
'gray68' : (173, 173, 173, 255) ,
'deeppink' : (255, 20, 147, 255) ,
'gray65' : (166, 166, 166, 255) ,
'gray64' : (163, 163, 163, 255) ,
'gray67' : (171, 171, 171, 255) ,
'gray66' : (168, 168, 168, 255) ,
'gray25' : (64, 64, 64, 255) ,
'coral' : (255, 127, 80, 255) ,
'gray63' : (161, 161, 161, 255) ,
'gray62' : (158, 158, 158, 255) ,
'goldenrod4' : (139, 105, 20, 255) ,
'grey35' : (89, 89, 89, 255) ,
'gray89' : (227, 227, 227, 255) ,
'goldenrod1' : (255, 193, 37, 255) ,
'goldenrod2' : (238, 180, 34, 255) ,
'goldenrod3' : (205, 155, 29, 255) ,
'springgreen1' : (0, 255, 127, 255) ,
'springgreen2' : (0, 238, 118, 255) ,
'springgreen3' : (0, 205, 102, 255) ,
'springgreen4' : (0, 139, 69, 255) ,
'mistyrose1' : (255, 228, 225, 255) ,
'sandybrown' : (244, 164, 96, 255) ,
'grey30' : (77, 77, 77, 255) ,
'seashell2' : (238, 229, 222, 255) ,
'seashell3' : (205, 197, 191, 255) ,
'tan' : (210, 180, 140, 255) ,
'seashell1' : (255, 245, 238, 255) ,
'mistyrose3' : (205, 183, 181, 255) ,
'magenta' : (255, 0, 255, 255) ,
'pink' : (255, 192, 203, 255) ,
'ivory2' : (238, 238, 224, 255) ,
'ivory1' : (255, 255, 240, 255) ,
'lightcyan2' : (209, 238, 238, 255) ,
'mediumseagreen' : (60, 179, 113, 255) ,
'ivory4' : (139, 139, 131, 255) ,
'darkorange' : (255, 140, 0, 255) ,
'powderblue' : (176, 224, 230, 255) ,
'dodgerblue1' : (30, 144, 255, 255) ,
'gray95' : (242, 242, 242, 255) ,
'firebrick1' : (255, 48, 48, 255) ,
'gray7' : (18, 18, 18, 255) ,
'mistyrose4' : (139, 125, 123, 255) ,
'tomato' : (255, 99, 71, 255) ,
'indianred2' : (238, 99, 99, 255) ,
'steelblue2' : (92, 172, 238, 255) ,
'gray100' : (255, 255, 255, 255) ,
'seashell4' : (139, 134, 130, 255) ,
'grey89' : (227, 227, 227, 255) ,
'grey88' : (224, 224, 224, 255) ,
'grey87' : (222, 222, 222, 255) ,
'grey86' : (219, 219, 219, 255) ,
'grey85' : (217, 217, 217, 255) ,
'grey84' : (214, 214, 214, 255) ,
'midnightblue' : (25, 25, 112, 255) ,
'grey82' : (209, 209, 209, 255) ,
'grey81' : (207, 207, 207, 255) ,
'yellow3' : (205, 205, 0, 255) ,
'ivory3' : (205, 205, 193, 255) ,
'grey22' : (56, 56, 56, 255) ,
'gray85' : (217, 217, 217, 255) ,
'violetred3' : (205, 50, 120, 255) ,
'dodgerblue2' : (28, 134, 238, 255) ,
'gray42' : (107, 107, 107, 255) ,
'sienna2' : (238, 121, 66, 255) ,
'grey72' : (184, 184, 184, 255) ,
'grey73' : (186, 186, 186, 255) ,
'grey70' : (179, 179, 179, 255) ,
'palevioletred' : (219, 112, 147, 255) ,
'lightslategray' : (119, 136, 153, 255) ,
'grey77' : (196, 196, 196, 255) ,
'grey74' : (189, 189, 189, 255) ,
'slategray1' : (198, 226, 255, 255) ,
'pink1' : (255, 181, 197, 255) ,
'mediumpurple1' : (171, 130, 255, 255) ,
'pink3' : (205, 145, 158, 255) ,
'antiquewhite4' : (139, 131, 120, 255) ,
'lightpink1' : (255, 174, 185, 255) ,
'honeydew2' : (224, 238, 224, 255) ,
'khaki4' : (139, 134, 78, 255) ,
'darkolivegreen4' : (110, 139, 61, 255) ,
'gray45' : (115, 115, 115, 255) ,
'slategray3' : (159, 182, 205, 255) ,
'darkolivegreen1' : (202, 255, 112, 255) ,
'khaki1' : (255, 246, 143, 255) ,
'khaki2' : (238, 230, 133, 255) ,
'khaki3' : (205, 198, 115, 255) ,
'lavenderblush' : (255, 240, 245, 255) ,
'honeydew4' : (131, 139, 131, 255) ,
'salmon3' : (205, 112, 84, 255) ,
'salmon2' : (238, 130, 98, 255) ,
'gray92' : (235, 235, 235, 255) ,
'salmon4' : (139, 76, 57, 255) ,
'gray49' : (125, 125, 125, 255) ,
'gray48' : (122, 122, 122, 255) ,
'linen' : (250, 240, 230, 255) ,
'burlywood1' : (255, 211, 155, 255) ,
'green' : (0, 255, 0, 255) ,
'gray47' : (120, 120, 120, 255) ,
'blueviolet' : (138, 43, 226, 255) ,
'brown2' : (238, 59, 59, 255) ,
'brown3' : (205, 51, 51, 255) ,
'peachpuff' : (255, 218, 185, 255) ,
'brown4' : (139, 35, 35, 255) ,
'firebrick4' : (139, 26, 26, 255) ,
'azure1' : (240, 255, 255, 255) ,
'azure3' : (193, 205, 205, 255) ,
'azure2' : (224, 238, 238, 255) ,
'azure4' : (131, 139, 139, 255) ,
'tomato4' : (139, 54, 38, 255) ,
'orange4' : (139, 90, 0, 255) ,
'firebrick' : (178, 34, 34, 255) ,
'indianred' : (205, 92, 92, 255) ,
'orange1' : (255, 165, 0, 255) ,
'orange3' : (205, 133, 0, 255) ,
'orange2' : (238, 154, 0, 255) ,
'darkolivegreen' : (85, 107, 47, 255) ,
'gray2' : (5, 5, 5, 255) ,
'slategrey' : (112, 128, 144, 255) ,
'gray81' : (207, 207, 207, 255) ,
'darkred' : (139, 0, 0, 255) ,
'gray3' : (8, 8, 8, 255) ,
'lightsteelblue1' : (202, 225, 255, 255) ,
'lightsteelblue2' : (188, 210, 238, 255) ,
'lightsteelblue3' : (162, 181, 205, 255) ,
'lightsteelblue4' : (110, 123, 139, 255) ,
'tomato3' : (205, 79, 57, 255) ,
'gray43' : (110, 110, 110, 255) ,
'darkgoldenrod4' : (139, 101, 8, 255) ,
'grey50' : (127, 127, 127, 255) ,
'yellow4' : (139, 139, 0, 255) ,
'mediumorchid' : (186, 85, 211, 255) ,
'yellow2' : (238, 238, 0, 255) ,
'darkgoldenrod2' : (238, 173, 14, 255) ,
'darkgoldenrod3' : (205, 149, 12, 255) ,
'chartreuse' : (127, 255, 0, 255) ,
'mediumblue' : (0, 0, 205, 255) ,
'gray4' : (10, 10, 10, 255) ,
'springgreen' : (0, 255, 127, 255) ,
'orange' : (255, 165, 0, 255) ,
'gray5' : (13, 13, 13, 255) ,
'lightsalmon' : (255, 160, 122, 255) ,
'gray19' : (48, 48, 48, 255) ,
'turquoise' : (64, 224, 208, 255) ,
'lightseagreen' : (32, 178, 170, 255) ,
'grey8' : (20, 20, 20, 255) ,
'grey9' : (23, 23, 23, 255) ,
'grey6' : (15, 15, 15, 255) ,
'grey7' : (18, 18, 18, 255) ,
'grey4' : (10, 10, 10, 255) ,
'grey5' : (13, 13, 13, 255) ,
'grey2' : (5, 5, 5, 255) ,
'grey3' : (8, 8, 8, 255) ,
'grey0' : (0, 0, 0, 255) ,
'grey1' : (3, 3, 3, 255) ,
'gray50' : (127, 127, 127, 255) ,
'goldenrod' : (218, 165, 32, 255) ,
'grey58' : (148, 148, 148, 255) ,
'grey59' : (150, 150, 150, 255) ,
'gray51' : (130, 130, 130, 255) ,
'grey54' : (138, 138, 138, 255) ,
'mediumorchid4' : (122, 55, 139, 255) ,
'grey56' : (143, 143, 143, 255) ,
'navajowhite3' : (205, 179, 139, 255) ,
'mediumorchid1' : (224, 102, 255, 255) ,
'grey51' : (130, 130, 130, 255) ,
'mediumorchid3' : (180, 82, 205, 255) ,
'mediumorchid2' : (209, 95, 238, 255) ,
'cyan2' : (0, 238, 238, 255) ,
'cyan3' : (0, 205, 205, 255) ,
'gray23' : (59, 59, 59, 255) ,
'cyan1' : (0, 255, 255, 255) ,
'darkgreen' : (0, 100, 0, 255) ,
'gray24' : (61, 61, 61, 255) ,
'cyan4' : (0, 139, 139, 255) ,
'darkviolet' : (148, 0, 211, 255) ,
'peachpuff4' : (139, 119, 101, 255) ,
'gray28' : (71, 71, 71, 255) ,
'slateblue4' : (71, 60, 139, 255) ,
'slateblue3' : (105, 89, 205, 255) ,
'peachpuff1' : (255, 218, 185, 255) ,
'peachpuff2' : (238, 203, 173, 255) ,
'peachpuff3' : (205, 175, 149, 255) ,
'gray29' : (74, 74, 74, 255) ,
'paleturquoise' : (175, 238, 238, 255) ,
'darkgray' : (169, 169, 169, 255) ,
'grey25' : (64, 64, 64, 255) ,
'darkmagenta' : (139, 0, 139, 255) ,
'palegoldenrod' : (238, 232, 170, 255) ,
'grey64' : (163, 163, 163, 255) ,
'grey12' : (31, 31, 31, 255) ,
'deeppink3' : (205, 16, 118, 255) ,
'gray79' : (201, 201, 201, 255) ,
'gray83' : (212, 212, 212, 255) ,
'deeppink2' : (238, 18, 137, 255) ,
'burlywood4' : (139, 115, 85, 255) ,
'palevioletred4' : (139, 71, 93, 255) ,
'deeppink1' : (255, 20, 147, 255) ,
'slateblue2' : (122, 103, 238, 255) ,
'grey46' : (117, 117, 117, 255) ,
'royalblue4' : (39, 64, 139, 255) ,
'yellowgreen' : (154, 205, 50, 255) ,
'royalblue1' : (72, 118, 255, 255) ,
'slateblue1' : (131, 111, 255, 255) ,
'lightgoldenrod3' : (205, 190, 112, 255) ,
'lightgoldenrod2' : (238, 220, 130, 255) ,
'navy' : (0, 0, 128, 255) ,
'orchid' : (218, 112, 214, 255) ,
'ghostwhite' : (248, 248, 255, 255) ,
'purple' : (160, 32, 240, 255) ,
'darkkhaki' : (189, 183, 107, 255) ,
'grey45' : (115, 115, 115, 255) ,
'gray94' : (240, 240, 240, 255) ,
'wheat4' : (139, 126, 102, 255) ,
'gray96' : (245, 245, 245, 255) ,
'gray97' : (247, 247, 247, 255) ,
'wheat1' : (255, 231, 186, 255) ,
'gray91' : (232, 232, 232, 255) ,
'wheat3' : (205, 186, 150, 255) ,
'wheat2' : (238, 216, 174, 255) ,
'indianred4' : (139, 58, 58, 255) ,
'coral2' : (238, 106, 80, 255) ,
'coral1' : (255, 114, 86, 255) ,
'violetred' : (208, 32, 144, 255) ,
'rosybrown3' : (205, 155, 155, 255) ,
'deepskyblue2' : (0, 178, 238, 255) ,
'deepskyblue1' : (0, 191, 255, 255) ,
'bisque' : (255, 228, 196, 255) ,
'grey49' : (125, 125, 125, 255) ,
'khaki' : (240, 230, 140, 255) ,
'wheat' : (245, 222, 179, 255) ,
'lightslateblue' : (132, 112, 255, 255) ,
'mediumpurple3' : (137, 104, 205, 255) ,
'gray55' : (140, 140, 140, 255) ,
'deepskyblue' : (0, 191, 255, 255) ,
'gray98' : (250, 250, 250, 255) ,
'steelblue' : (70, 130, 180, 255) ,
'aliceblue' : (240, 248, 255, 255) ,
'lightskyblue2' : (164, 211, 238, 255) ,
'lightskyblue3' : (141, 182, 205, 255) ,
'lightslategrey' : (119, 136, 153, 255) ,
'blue3' : (0, 0, 205, 255) ,
'blue2' : (0, 0, 238, 255) ,
'gainsboro' : (220, 220, 220, 255) ,
'grey76' : (194, 194, 194, 255) ,
'purple3' : (125, 38, 205, 255) ,
'plum4' : (139, 102, 139, 255) ,
'gray56' : (143, 143, 143, 255) ,
'plum3' : (205, 150, 205, 255) ,
'plum' : (221, 160, 221, 255) ,
'lightgrey' : (211, 211, 211, 255) ,
'mediumslateblue' : (123, 104, 238, 255) ,
'mistyrose' : (255, 228, 225, 255) ,
'lightcyan1' : (224, 255, 255, 255) ,
'grey71' : (181, 181, 181, 255) ,
'darksalmon' : (233, 150, 122, 255) ,
'beige' : (245, 245, 220, 255) ,
'grey24' : (61, 61, 61, 255) ,
'azure' : (240, 255, 255, 255) ,
'honeydew1' : (240, 255, 240, 255) ,
'slategray2' : (185, 211, 238, 255) ,
'dodgerblue3' : (24, 116, 205, 255) ,
'slategray4' : (108, 123, 139, 255) ,
'grey27' : (69, 69, 69, 255) ,
'lightcyan3' : (180, 205, 205, 255) ,
'cornsilk' : (255, 248, 220, 255) ,
'tomato1' : (255, 99, 71, 255) ,
'gray57' : (145, 145, 145, 255) ,
'mediumvioletred' : (199, 21, 133, 255) ,
'tomato2' : (238, 92, 66, 255) ,
'snow4' : (139, 137, 137, 255) ,
'grey75' : (191, 191, 191, 255) ,
'snow2' : (238, 233, 233, 255) ,
'snow3' : (205, 201, 201, 255) ,
'snow1' : (255, 250, 250, 255) ,
'grey23' : (59, 59, 59, 255) ,
'cornsilk3' : (205, 200, 177, 255) ,
'lightcoral' : (240, 128, 128, 255) ,
'orangered' : (255, 69, 0, 255) ,
'navajowhite' : (255, 222, 173, 255) ,
'mediumpurple2' : (159, 121, 238, 255) ,
'slategray' : (112, 128, 144, 255) ,
'pink2' : (238, 169, 184, 255) ,
'grey29' : (74, 74, 74, 255) ,
'grey28' : (71, 71, 71, 255) ,
'gray82' : (209, 209, 209, 255) ,
'burlywood' : (222, 184, 135, 255) ,
'mediumpurple4' : (93, 71, 139, 255) ,
'mediumspringgreen' : (0, 250, 154, 255) ,
'grey26' : (66, 66, 66, 255) ,
'grey21' : (54, 54, 54, 255) ,
'grey20' : (51, 51, 51, 255) ,
'blanchedalmond' : (255, 235, 205, 255) ,
'pink4' : (139, 99, 108, 255) ,
'gray78' : (199, 199, 199, 255) ,
'tan3' : (205, 133, 63, 255) ,
'gray76' : (194, 194, 194, 255) ,
'gray77' : (196, 196, 196, 255) ,
'white' : (255, 255, 255, 255) ,
'gray75' : (191, 191, 191, 255) ,
'gray72' : (184, 184, 184, 255) ,
'gray73' : (186, 186, 186, 255) ,
'gray70' : (179, 179, 179, 255) ,
'gray71' : (181, 181, 181, 255) ,
'lightgray' : (211, 211, 211, 255) ,
'ivory' : (255, 255, 240, 255) ,
'gray46' : (117, 117, 117, 255) ,
'gray74' : (189, 189, 189, 255) ,
'lightyellow3' : (205, 205, 180, 255) ,
'lightpink2' : (238, 162, 173, 255) ,
'lightpink3' : (205, 140, 149, 255) ,
'paleturquoise4' : (102, 139, 139, 255) ,
'lightpink4' : (139, 95, 101, 255) ,
'paleturquoise3' : (150, 205, 205, 255) ,
'seagreen4' : (46, 139, 87, 255) ,
'seagreen3' : (67, 205, 128, 255) ,
'seagreen2' : (78, 238, 148, 255) ,
'seagreen1' : (84, 255, 159, 255) ,
'paleturquoise2' : (174, 238, 238, 255) ,
'gray52' : (133, 133, 133, 255) ,
'cornsilk4' : (139, 136, 120, 255) ,
'cornsilk2' : (238, 232, 205, 255) ,
'darkolivegreen3' : (162, 205, 90, 255) ,
'cornsilk1' : (255, 248, 220, 255) ,
'limegreen' : (50, 205, 50, 255) ,
'darkolivegreen2' : (188, 238, 104, 255) ,
'grey' : (190, 190, 190, 255) ,
'violetred2' : (238, 58, 140, 255) ,
'salmon1' : (255, 140, 105, 255) ,
'grey92' : (235, 235, 235, 255) ,
'grey93' : (237, 237, 237, 255) ,
'grey94' : (240, 240, 240, 255) ,
'grey95' : (242, 242, 242, 255) ,
'grey96' : (245, 245, 245, 255) ,
'grey83' : (212, 212, 212, 255) ,
'grey98' : (250, 250, 250, 255) ,
'lightgoldenrod1' : (255, 236, 139, 255) ,
'palegreen1' : (154, 255, 154, 255) ,
'red3' : (205, 0, 0, 255) ,
'palegreen3' : (124, 205, 124, 255) ,
'palegreen2' : (144, 238, 144, 255) ,
'palegreen4' : (84, 139, 84, 255) ,
'cadetblue' : (95, 158, 160, 255) ,
'violet' : (238, 130, 238, 255) ,
'mistyrose2' : (238, 213, 210, 255) ,
'slateblue' : (106, 90, 205, 255) ,
'grey43' : (110, 110, 110, 255) ,
'grey90' : (229, 229, 229, 255) ,
'gray35' : (89, 89, 89, 255) ,
'turquoise3' : (0, 197, 205, 255) ,
'turquoise2' : (0, 229, 238, 255) ,
'burlywood3' : (205, 170, 125, 255) ,
'burlywood2' : (238, 197, 145, 255) ,
'lightcyan4' : (122, 139, 139, 255) ,
'rosybrown' : (188, 143, 143, 255) ,
'turquoise4' : (0, 134, 139, 255) ,
'whitesmoke' : (245, 245, 245, 255) ,
'lightblue' : (173, 216, 230, 255) ,
'grey40' : (102, 102, 102, 255) ,
'gray40' : (102, 102, 102, 255) ,
'honeydew3' : (193, 205, 193, 255) ,
'dimgray' : (105, 105, 105, 255) ,
'grey47' : (120, 120, 120, 255) ,
'seagreen' : (46, 139, 87, 255) ,
'red4' : (139, 0, 0, 255) ,
'grey14' : (36, 36, 36, 255) ,
'snow' : (255, 250, 250, 255) ,
'darkorchid1' : (191, 62, 255, 255) ,
'gray58' : (148, 148, 148, 255) ,
'gray59' : (150, 150, 150, 255) ,
'cadetblue4' : (83, 134, 139, 255) ,
'cadetblue3' : (122, 197, 205, 255) ,
'cadetblue2' : (142, 229, 238, 255) ,
'cadetblue1' : (152, 245, 255, 255) ,
'olivedrab4' : (105, 139, 34, 255) ,
'purple4' : (85, 26, 139, 255) ,
'gray20' : (51, 51, 51, 255) ,
'grey44' : (112, 112, 112, 255) ,
'purple1' : (155, 48, 255, 255) ,
'olivedrab1' : (192, 255, 62, 255) ,
'olivedrab2' : (179, 238, 58, 255) ,
'olivedrab3' : (154, 205, 50, 255) ,
'orangered3' : (205, 55, 0, 255) ,
'orangered2' : (238, 64, 0, 255) ,
'orangered1' : (255, 69, 0, 255) ,
'darkorchid' : (153, 50, 204, 255) ,
'thistle3' : (205, 181, 205, 255) ,
'thistle2' : (238, 210, 238, 255) ,
'thistle1' : (255, 225, 255, 255) ,
'salmon' : (250, 128, 114, 255) ,
'gray93' : (237, 237, 237, 255) ,
'thistle4' : (139, 123, 139, 255) ,
'gray39' : (99, 99, 99, 255) ,
'lawngreen' : (124, 252, 0, 255) ,
'hotpink3' : (205, 96, 144, 255) ,
'hotpink2' : (238, 106, 167, 255) ,
'hotpink1' : (255, 110, 180, 255) ,
'lightgreen' : (144, 238, 144, 255) ,
'hotpink4' : (139, 58, 98, 255) ,
'darkseagreen4' : (105, 139, 105, 255) ,
'darkseagreen3' : (155, 205, 155, 255) ,
'darkseagreen2' : (180, 238, 180, 255) ,
'darkseagreen1' : (193, 255, 193, 255) ,
'deepskyblue4' : (0, 104, 139, 255) ,
'gray44' : (112, 112, 112, 255) ,
'navyblue' : (0, 0, 128, 255) ,
'darkblue' : (0, 0, 139, 255) ,
'forestgreen' : (34, 139, 34, 255) ,
'gray53' : (135, 135, 135, 255) ,
'grey100' : (255, 255, 255, 255) ,
'brown1' : (255, 64, 64, 255) ,
}
| gpl-3.0 |
bluven/eonboard | eoncloud_web/biz/idc/models.py | 4 | 2462 | # -*- encoding:utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class DataCenter(models.Model):
"""
A data center is a mapper to backend openstack cluster
the config of project/user/password is for cloud-web api
to create project and user
"""
id = models.AutoField(primary_key=True)
name = models.CharField(_("Name"), max_length=255)
host = models.CharField(_(u"openstack host"), null=False, blank=False, max_length=255, unique=True, help_text=_(u"IP of Compute Center"))
project = models.CharField(_(u"default project"), null=False, blank=False, max_length=255,
help_text=_(u"Project Name of Data Center,recommended: admin"))
user = models.CharField(_(u"default project user"), null=False, blank=False, max_length=255,
help_text=_(u"User who can visit the project"))
password = models.CharField(_(u"default user password"), null=False, blank=False, max_length=255)
auth_url = models.CharField(_(u"usually http://host:5000/v2.0"), null=False, blank=False, max_length=255)
ext_net = models.CharField(_(u"External Network Name"), null=False, blank=False, max_length=255, default="net04_ext")
@classmethod
def get_default(cls):
try:
return cls.objects.filter().order_by('id')[0]
except:
return None
def __unicode__(self):
return self.name
class Meta:
db_table = "data_center"
verbose_name = _("DataCenter")
verbose_name_plural = _("DataCenter")
class UserDataCenter(models.Model):
"""
An user data center just like the project in openstack,
when user registed in cloud-web we'll automatic create a project
with name "project-%(user-id)s"
"""
id = models.AutoField(primary_key=True)
data_center = models.ForeignKey(DataCenter)
user = models.ForeignKey('auth.User')
tenant_name = models.CharField(_("Tenant"), max_length=255)
tenant_uuid = models.CharField(_("Tenant UUID"), max_length=64)
keystone_user = models.CharField(_("User"), max_length=255)
keystone_password = models.CharField(_("Password"), max_length=255)
def __unicode__(self):
return "%s-%s" % (self.data_center.name, self.user.username)
class Meta:
db_table = "user_data_center"
verbose_name = _("UserDataCenter")
verbose_name_plural = _("UserDataCenter")
| apache-2.0 |
tyb0807/angr | angr/analyses/datagraph_meta.py | 1 | 3365 |
import logging
from ..sim_procedure import SimProcedure
l = logging.getLogger("angr.analyses.datagraph_meta")
class DataGraphMeta(object):
def __init__(self):
self._p = None
def _irsb(self, in_state):
"""
We expect a VSA state here.
"""
return self._p.factory.successors(in_state)
def _vfg_node(self, addr):
"""
Gets vfg node at @addr
Returns VFGNode or None
"""
for n in self._vfg._nodes.values():
if n.addr == addr:
return n
raise DataGraphError("No VFG node at 0x%x" % addr)
def get_irsb_at(self, addr):
n = self._vfg_node(addr)
if n is None:
raise DataGraphError("No VFG node at this address")
return self._irsb(n.state)
def pp(self, imarks=False):
"""
Pretty print the graph. @imarks determine whether the printed graph
represents instructions (coarse grained) for easier navigation, or
exact statements.
"""
for e in self.graph.edges():
data = dict(self.graph.get_edge_data(e[0], e[1]))
data['label'] = str(data['label']) + " ; " + self._simproc_info(e[0]) + self._simproc_info(e[1])
self._print_edge(e, data, imarks)
def _print_edge(self, e, data, imarks=False):
pp = []
for stmt in e:
if imarks is False or stmt[1] == -1: # SimProcedure
s = "(0x%x, %d)" % (stmt[0], stmt[1])
else:
s = "[0x%x]" % self._imarks[stmt]
pp.append(s)
print pp[0] + " -> " + pp[1] + " : " + str(data)
def _branch(self, live_defs, node, path=""):
"""
Recursive function, it branches in every possible path in the VFG.
@live_defs: a dict {addr:stmt} of live definitions at the start point
@node: the starting vfg node
Returns: the address of the block where the execution stops
"""
irsb = self._irsb(node.state)
path = path + " -> " + hex(irsb.addr)
if isinstance(irsb, SimProcedure):
self._simproc_map[irsb.addr] = repr(irsb)
l.debug("--> Branch: running block 0x%x" % irsb.addr)
block = self._make_block(irsb, live_defs)
self._imarks.update(block._imarks)
if block.stop == True:
#l.debug(" ### Stopping at block 0x%x" % (irsb.addr))
l.debug(" ### End of path %s" % path)
return irsb.addr
succ = self._vfg._graph.successors(node)
defer = []
for s in succ:
# Consider fake returns last
if self._vfg._graph.edge[node][s]['jumpkind'] == 'Ijk_FakeRet':
defer.append(s)
continue
# We need to make a copy of the dict !
self._branch(dict(block.live_defs), s, path)
# We explore every other paths before taking fake rets.
# Ideally, we want to take fake rets only when functions don't
# return.
for s in defer:
self._branch(dict(block.live_defs), s, path)
def _make_block(self, vfg_node, live_defs):
raise DataGraphError("Not Implemented")
def _simproc_info(self, node):
if node[0] in self._simproc_map:
return self._simproc_map[node[0]]
return ""
| bsd-2-clause |
mxjl620/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
ganeshnalawade/ansible | test/support/windows-integration/plugins/modules/win_whoami.py | 68 | 5621 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_whoami
version_added: "2.5"
short_description: Get information about the current user and process
description:
- Designed to return the same information as the C(whoami /all) command.
- Also includes information missing from C(whoami) such as logon metadata like
logon rights, id, type.
notes:
- If running this module with a non admin user, the logon rights will be an
empty list as Administrator rights are required to query LSA for the
information.
seealso:
- module: win_credential
- module: win_group_membership
- module: win_user_right
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Get whoami information
win_whoami:
'''
RETURN = r'''
authentication_package:
description: The name of the authentication package used to authenticate the
user in the session.
returned: success
type: str
sample: Negotiate
user_flags:
description: The user flags for the logon session, see UserFlags in
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380128).
returned: success
type: str
sample: Winlogon
upn:
description: The user principal name of the current user.
returned: success
type: str
sample: Administrator@DOMAIN.COM
logon_type:
description: The logon type that identifies the logon method, see
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380129.aspx).
returned: success
type: str
sample: Network
privileges:
description: A dictionary of privileges and their state on the logon token.
returned: success
type: dict
sample: {
"SeChangeNotifyPrivileges": "enabled-by-default",
"SeRemoteShutdownPrivilege": "disabled",
"SeDebugPrivilege": "enabled"
}
label:
description: The mandatory label set to the logon session.
returned: success
type: complex
contains:
domain_name:
description: The domain name of the label SID.
returned: success
type: str
sample: Mandatory Label
sid:
description: The SID in string form.
returned: success
type: str
sample: S-1-16-12288
account_name:
description: The account name of the label SID.
returned: success
type: str
sample: High Mandatory Level
type:
description: The type of SID.
returned: success
type: str
sample: Label
impersonation_level:
description: The impersonation level of the token, only valid if
C(token_type) is C(TokenImpersonation), see
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa379572.aspx).
returned: success
type: str
sample: SecurityAnonymous
login_time:
description: The logon time in ISO 8601 format
returned: success
type: str
sample: '2017-11-27T06:24:14.3321665+10:00'
groups:
description: A list of groups and attributes that the user is a member of.
returned: success
type: list
sample: [
{
"account_name": "Domain Users",
"domain_name": "DOMAIN",
"attributes": [
"Mandatory",
"Enabled by default",
"Enabled"
],
"sid": "S-1-5-21-1654078763-769949647-2968445802-513",
"type": "Group"
},
{
"account_name": "Administrators",
"domain_name": "BUILTIN",
"attributes": [
"Mandatory",
"Enabled by default",
"Enabled",
"Owner"
],
"sid": "S-1-5-32-544",
"type": "Alias"
}
]
account:
description: The running account SID details.
returned: success
type: complex
contains:
domain_name:
description: The domain name of the account SID.
returned: success
type: str
sample: DOMAIN
sid:
description: The SID in string form.
returned: success
type: str
sample: S-1-5-21-1654078763-769949647-2968445802-500
account_name:
description: The account name of the account SID.
returned: success
type: str
sample: Administrator
type:
description: The type of SID.
returned: success
type: str
sample: User
login_domain:
description: The name of the domain used to authenticate the owner of the
session.
returned: success
type: str
sample: DOMAIN
rights:
description: A list of logon rights assigned to the logon.
returned: success and running user is a member of the local Administrators group
type: list
sample: [
"SeNetworkLogonRight",
"SeInteractiveLogonRight",
"SeBatchLogonRight",
"SeRemoteInteractiveLogonRight"
]
logon_server:
description: The name of the server used to authenticate the owner of the
logon session.
returned: success
type: str
sample: DC01
logon_id:
description: The unique identifier of the logon session.
returned: success
type: int
sample: 20470143
dns_domain_name:
description: The DNS name of the logon session, this is an empty string if
this is not set.
returned: success
type: str
sample: DOMAIN.COM
token_type:
description: The token type to indicate whether it is a primary or
impersonation token.
returned: success
type: str
sample: TokenPrimary
'''
| gpl-3.0 |
avedaee/DIRAC | DataManagementSystem/DB/TransferDB.py | 2 | 71822 | ########################################################################
# $HeadURL$
# File: TransferDB.py
########################################################################
""" :mod: TransferDB
================
TransferDB is a front end to the TransferDB mysql database, built
on top of RequestDB.
This database holds all information used by DIRAC FTS subsystem.
It is mainly used by FTSSubmitAgent, FTSMonitorAgent and TransferAgent.
:deprecated:
"""
__RCSID__ = "$Id$"
# # imports
import threading
from types import ListType
import time
import datetime
import random
# # from DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR, Time
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities.List import intListToString
from DIRAC.Core.Utilities import Time
from DIRAC.Resources.Storage.StorageElement import StorageElement
# # it's a magic!
# MAGIC_EPOC_NUMBER = 1270000000
# # This is a better one, using only datetime (DIRAC.Time) to avoid jumps when there is a change in time
NEW_MAGIC_EPOCH_2K = 323322400
# # create logger
gLogger.initialize( "DMS", "/Databases/TransferDB/Test" )
class TransferDB( DB ):
"""
.. class:: TransferDB
This db is holding all information used by FTS systems.
"""
def __init__( self, systemInstance = "Default", maxQueueSize = 10 ):
"""c'tor
:param self: self reference
:param str systemInstance: ???
:param int maxQueueSize: size of queries queue
"""
DB.__init__( self, "TransferDB", "RequestManagement/RequestDB", maxQueueSize )
self.getIdLock = threading.Lock()
# # max attmprt for reschedule
self.maxAttempt = 100
def __getFineTime( self ):
"""
Return a "small" number of seconds with millisecond precision
"""
return Time.to2K() - NEW_MAGIC_EPOCH_2K
#################################################################################
# These are the methods for managing the Channels table
def createChannel( self, sourceSE, destSE ):
""" create a new Channels record
:param self: self reference
:param str sourceSE: source SE
:param str destSE: destination SE
"""
self.getIdLock.acquire()
res = self.checkChannelExists( sourceSE, destSE )
if res['OK']:
if res['Value']['Exists']:
self.getIdLock.release()
msg = 'TransferDB._createChannel: Channel %s already exists from %s to %s.' % ( res['Value']['ChannelID'],
sourceSE,
destSE )
gLogger.debug( msg )
return res
req = "INSERT INTO Channels (SourceSite, DestinationSite, Status, ChannelName) VALUES ('%s','%s','%s','%s-%s');"
req = req % ( sourceSE, destSE, 'Active', sourceSE, destSE )
res = self._update( req )
if not res['OK']:
self.getIdLock.release()
err = 'TransferDB._createChannel: Failed to create channel from %s to %s.' % ( sourceSE, destSE )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
self.getIdLock.release()
res = self.checkChannelExists( sourceSE, destSE )
return res
def checkChannelExists( self, sourceSE, destSE ):
""" check existence of FTS channel between :sourceSE: and :destSE:
:param self: self reference
:param str soucreSE: source SE
:param str destSE: target SE
"""
req = "SELECT ChannelID FROM Channels WHERE SourceSite = '%s' AND DestinationSite = '%s';" % ( sourceSE, destSE )
res = self._query( req )
if not res["OK"]:
err = 'TransferDB._checkChannelExists: Failed to retrieve ChannelID for %s to %s.' % ( sourceSE, destSE )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
resultDict = { "Exists" : False }
if res["Value"]:
resultDict["Exists"] = True
resultDict["ChannelID"] = res["Value"][0][0]
return S_OK( resultDict )
def getChannelID( self, sourceSE, destSE ):
""" get Channels.ChannelID for given source and destination SE
:param self: self reference
:param str sourceSE: source SE
:param str destSE: destination SE
"""
res = self.checkChannelExists( sourceSE, destSE )
if res['OK']:
if res['Value']['Exists']:
return S_OK( res['Value']['ChannelID'] )
else:
return S_ERROR( 'TransferDB._getChannelID: Channel from %s to %s does not exist.' % ( sourceSE, destSE ) )
return res
def getChannelAttribute( self, channelID, attrName ):
""" select attribute :attrName: from Channels table given ChannelID
:param self: self reference
:param int channelID: Channels.ChannelID
:param attrName: one of Channels table column name
"""
req = "SELECT %s FROM Channels WHERE ChannelID = %s;" % ( attrName, channelID )
res = self._query( req )
if not res['OK']:
err = 'TransferDB._getChannelAttribute: Failed to get %s for Channel %s.' % ( attrName, channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
err = 'TransferDB._getChannelAttribute: No Channel %s defined.' % channelID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return S_OK( res['Value'][0][0] )
def setChannelAttribute( self, channelID, attrName, attrValue ):
""" set Channels attribute :attrName: to new value :attrValue: given channelID
:param self: self reference
:param str attrName: one of Channels table column name
:param mixed attrValue: new value to be set
"""
req = "UPDATE Channels SET %s = '%s' WHERE ChannelID = %s;" % ( attrName, attrValue, channelID )
res = self._update( req )
if not res['OK']:
err = 'TransferDB._setChannelAttribute: Failed to update %s to %s for Channel %s.' % ( attrName,
attrValue,
channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def getChannels( self ):
""" read all records from Channels table
:param self: self reference
"""
req = "SELECT ChannelID,SourceSite,DestinationSite,Status,ChannelName from Channels;"
res = self._query( req )
if not res['OK']:
err = 'TransferDB._getChannels: Failed to retrieve channels information.'
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
channels = {}
keyTuple = ( "Source", "Destination", "Status", "ChannelName" )
for record in res['Value']:
channelID = record[0]
channelTuple = record[1:]
channels[channelID] = dict( zip( keyTuple, channelTuple ) )
return S_OK( channels )
def getChannelsForState( self, status ):
""" select Channels records for Status :status:
:param self: self reference
:param str status: required Channels.Status
"""
req = "SELECT ChannelID,SourceSite,DestinationSite FROM Channels WHERE Status = '%s';" % status
res = self._query( req )
if not res['OK']:
err = 'TransferDB._getChannelsInState: Failed to get Channels for Status = %s.' % status
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
return S_OK()
channels = {}
channelIDs = []
for channelID, sourceSite, destinationSite in res['Value']:
channels[channelID] = { 'SourceSite' : sourceSite, 'DestinationSite' : destinationSite }
channelIDs.append( channelID )
return S_OK( { 'ChannelIDs' : channelIDs, 'Channels' : channels } )
def decreaseChannelFiles( self, channelID ):
""" decrease Channels.Files for given Channels.ChannelID
:param self: self reference
:param int channelID: Channels.ChannelID
"""
req = "UPDATE Channels SET Files = Files-1 WHERE ChannelID = %s;" % channelID
res = self._update( req )
if not res['OK']:
err = 'TransferDB.decreaseChannelFiles: Failed to update Files for Channel %s.' % channelID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def increaseChannelFiles( self, channelID ):
""" increase Channels.Files for given Channels.ChannelID
:param self: self reference
:param int channelID: Channels.ChannelID
"""
req = "UPDATE Channels SET Files = Files+1 WHERE ChannelID = %s;" % channelID
res = self._update( req )
if not res['OK']:
err = 'TransferDB.increaseChannelFiles: Failed to update Files for Channel %s.' % channelID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
#################################################################################
# These are the methods for managing the Channel table
def selectChannelsForSubmission( self, maxJobsPerChannel ):
""" select active channels
:param self: self reference
:param int maxJobsPerChannel: max number of simultaneous FTS transfers for channel
"""
res = self.getChannelQueues( status = 'Waiting' )
if not res['OK']:
return res
if not res['Value']:
return S_OK()
channels = res['Value']
candidateChannels = {}
for channelID in channels:
if channels[channelID]['Status'] == 'Active':
if channels[channelID]['Files'] > 0:
candidateChannels[channelID] = channels[channelID]['Files']
if not candidateChannels:
return S_OK()
strChannelIDs = intListToString( candidateChannels.keys() )
req = "SELECT ChannelID,%s-SUM(Status='Submitted') FROM FTSReq WHERE ChannelID IN (%s) GROUP BY ChannelID;" % ( maxJobsPerChannel, strChannelIDs )
res = self._query( req )
if not res['OK']:
err = 'TransferDB.selectChannelsForSubmission: Failed to count FTSJobs on Channels %s.' % strChannelIDs
return S_ERROR( err )
channelJobs = {}
for channelID, jobs in res['Value']:
channelJobs[channelID] = jobs
for channelID in candidateChannels:
if channelID not in channelJobs:
channelJobs[channelID] = maxJobsPerChannel
req = "SELECT ChannelID,SourceSite,DestinationSite,FTSServer,Files FROM Channels WHERE ChannelID IN (%s);" % strChannelIDs
res = self._query( req )
channels = []
keyTuple = ( "ChannelID", "Source", "Destination", "FTSServer", "NumFiles" )
for recordTuple in res['Value']:
resDict = dict( zip( keyTuple, recordTuple ) )
for i in range( channelJobs[channelID] ):
channels.append( resDict )
return S_OK( channels )
def selectChannelForSubmission( self, maxJobsPerChannel ):
""" select one channel for submission
:param self: self reference
:param int maxJobsPerChannel: max nb of simultanious FTS requests
"""
res = self.getChannelQueues( status = 'Waiting' )
if not res['OK']:
return res
if not res['Value']:
return S_OK()
channels = res['Value']
candidateChannels = {}
for channelID in channels:
if channels[channelID]['Status'] == 'Active':
if channels[channelID]['Files'] > 0:
candidateChannels[channelID] = channels[channelID]['Files']
if not candidateChannels:
return S_OK()
strChannelIDs = intListToString( candidateChannels.keys() )
req = "SELECT ChannelID,%s-SUM(Status='Submitted') FROM FTSReq WHERE ChannelID IN (%s) GROUP BY ChannelID;"
req = req % ( maxJobsPerChannel, strChannelIDs )
res = self._query( req )
if not res['OK']:
err = 'TransferDB._selectChannelForSubmission: Failed to count FTSJobs on Channels %s.' % strChannelIDs
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
# # c'tor using a tuple of pairs ;)
withJobs = dict( res["Value"] )
minJobs = maxJobsPerChannel
maxFiles = 0
possibleChannels = []
for channelID, files in candidateChannels.items():
numberOfJobs = withJobs[channelID] if channelID in withJobs else 0
if numberOfJobs < maxJobsPerChannel:
if numberOfJobs < minJobs:
minJobs = numberOfJobs
maxFiles = files
possibleChannels.append( channelID )
elif numberOfJobs == minJobs:
if files > maxFiles:
maxFiles = files
possibleChannels = []
possibleChannels.append( channelID )
elif candidateChannels[channelID] == maxFiles:
possibleChannels.append( channelID )
if not possibleChannels:
return S_OK()
selectedChannel = random.choice( possibleChannels ) # randomize(possibleChannels)[0]
resDict = channels[selectedChannel]
resDict['ChannelID'] = selectedChannel
return S_OK( resDict )
def addFileToChannel( self,
channelID,
fileID,
sourceSE,
sourceSURL,
targetSE,
targetSURL,
fileSize,
fileStatus = 'Waiting' ):
""" insert new Channel record
:param self: self reference
:param int channelID: Channels.ChannelID
:param int fileID: Files.FileID
:param str sourceSE: source SE
:param str sourceSURL: source storage URL
:param str targetSE: destination SE
:param str targetSURL: destination storage URL
:param int fileSize: file size in bytes
:param str fileStatus: Channel.Status
"""
res = self.checkFileChannelExists( channelID, fileID )
if not res['OK']:
err = 'TransferDB.addFileToChannel: Failed check existance of File %s on Channel %s.' % ( fileID, channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if res['Value']:
err = 'TransferDB.addFileToChannel: File %s already exists on Channel %s.' % ( fileID, channelID )
return S_ERROR( err )
time_order = self.__getFineTime()
values = "%s,%s,'%s','%s','%s','%s',UTC_TIMESTAMP(),%s,UTC_TIMESTAMP(),%s,%s,'%s'" % ( channelID, fileID,
sourceSE, sourceSURL,
targetSE, targetSURL,
time_order, time_order,
fileSize, fileStatus )
columns = ",".join( [ "ChannelID" , "FileID",
"SourceSE", "SourceSURL",
"TargetSE", "TargetSURL",
"SchedulingTime" , " SchedulingTimeOrder",
"LastUpdate", "LastUpdateTimeOrder",
"FileSize", "Status" ] )
req = "INSERT INTO Channel (%s) VALUES (%s);" % ( columns, values )
res = self._update( req )
if not res['OK']:
err = 'TransferDB.addFileToChannel: Failed to insert File %s to Channel %s.' % ( fileID, channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def checkFileChannelExists( self, channelID, fileID ):
""" check if record with :channelID: and :fileID: has been already put into Channel table
:param self: self reference
:param int channelID: Channel.ChannelID
:param int fileID: Files.FileID
"""
req = "SELECT FileID FROM Channel WHERE ChannelID = %s and FileID = %s;" % ( channelID, fileID )
res = self._query( req )
if not res['OK']:
err = 'TransferDB.checkFileChannelExists: Failed to check existance of File %s on Channel %s.' % ( fileID,
channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if res['Value']:
return S_OK( True )
return S_OK( False )
def setChannelFilesExecuting( self, channelID, fileIDs ):
""" update Channel.Status to 'Executing' given :channelID: and list of :fileID:
:param self: self reference
:param int channelID: Channel.ChannelID
:param list fileIDs: list of Channel.FileID
"""
strFileIDs = intListToString( fileIDs )
time_order = self.__getFineTime()
req = "UPDATE Channel SET Status='Executing', LastUpdate=UTC_TIMESTAMP(), " \
"LastUpdateTimeOrder=%s WHERE FileID IN (%s) AND ChannelID = %s;" % ( time_order,
strFileIDs,
channelID )
res = self._update( req )
if not res['OK']:
err = 'TransferDB.setChannelFilesExecuting: Failed to set file executing.'
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def updateAncestorChannelStatus( self, channelID, fileIDs ):
""" update Channel.Status
WaitingN => Waiting
DoneN => Done
:param self: self reference
:param int channelID: Channel.ChannelID
:param list fileIDs: list of Files.FileID
"""
if not fileIDs:
return S_OK()
strFileIDs = intListToString( fileIDs )
req = "UPDATE Channel SET Status = "
req += "CASE WHEN Status = 'Waiting%s' THEN 'Waiting' WHEN Status = 'Done%s' THEN 'Done' END " % ( channelID,
channelID )
req += "WHERE FileID IN (%s) AND ( Status = 'Waiting%s' OR Status = 'Done%s');" % ( strFileIDs,
channelID,
channelID )
res = self._update( req )
if not res['OK']:
err = "TransferDB.updateAncestorChannelStatus: Failed to update status"
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def removeFilesFromChannel( self, channelID, fileIDs ):
""" remove Files from Channel given list of FileIDs and ChannelID
:param self: self reference
:param int channelID: Channel.ChannelID
:param list fileIDs: list of Files.FileID
"""
for fileID in fileIDs:
res = self.removeFileFromChannel( channelID, fileID )
if not res['OK']:
return res
return res
def setFileToReschedule( self, fileID ):
""" allow reschedule for file
:param int fileID: Files.FileID
"""
req = "SELECT `Attempt` FROM `Files` WHERE FileID = %s;" % fileID
res = self._update( req )
if not res["OK"]:
gLogger.error( "setFileToReschedule: %s" % res["Message"] )
return res
res = res["Value"]
if res > self.maxAttempt:
return S_OK( "max reschedule attempt reached" )
req = "DELETE FROM `Channel` WHERE `FileID` = %s;" % fileID
res = self._update( req )
if not res["OK"]:
gLogger.error( "setFileToReschedule: %s" % res["Message"] )
return res
req = "DELETE FROM `ReplicationTree` WHERE `FileID` = %s;" % fileID
res = self._update( req )
if not res["OK"]:
gLogger.error( "setFileToReschedule: %s" % res["Message"] )
return res
req = "DELETE FROM `FileToCat` WHERE `FileID` = %s;" % fileID
res = self._update( req )
if not res["OK"]:
gLogger.error( "setFileToReschedule: %s" % res["Message"] )
return res
req = "UPDATE `Files` SET `Status`='Waiting',`Attempt`=`Attempt`+1 WHERE `Status` = 'Scheduled' AND `FileID` = %s;" % fileID
res = self._update( req )
if not res["OK"]:
gLogger.error( "setFileToReschedule: %s" % res["Message"] )
return res
return S_OK()
def removeFileFromChannel( self, channelID, fileID ):
""" remove single file from Channel given FileID and ChannelID
:param self: self refernce
:param int channelID: Channel.ChannelID
:param int FileID: Files.FileID
"""
req = "DELETE FROM Channel WHERE ChannelID = %s and FileID = %s;" % ( channelID, fileID )
res = self._update( req )
if not res['OK']:
err = 'TransferDB._removeFileFromChannel: Failed to remove File %s from Channel %s.' % ( fileID, channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def updateCompletedChannelStatus( self, channelID, fileIDs ):
""" update Channel.Status and Files.Status to 'Done' given :channelID: and list of :fileIDs:
:param self: self reference
:param int channelID: Channel.ChannelID
:param list fileIDs: list of Files.FileID
"""
time_order = self.__getFineTime()
strFileIDs = intListToString( fileIDs )
req = "SELECT FileID,Status,COUNT(*) FROM Channel WHERE FileID IN (%s) GROUP BY FileID,Status;" % strFileIDs
res = self._query( req )
if not res['OK']:
return res
fileDict = {}
for fileID, status, count in res['Value']:
if fileID not in fileDict:
fileDict[fileID] = 0
if status != 'Done':
fileDict[fileID] += count
toUpdate = [ fileID for fileID, notDone in fileDict.items() if notDone == 1 ]
if toUpdate:
req = "UPDATE Files SET Status = 'Done' WHERE FileID IN (%s);" % intListToString( toUpdate )
res = self._update( req )
if not res['OK']:
return res
req = "UPDATE Channel SET Status = 'Done',LastUpdate=UTC_TIMESTAMP(),LastUpdateTimeOrder=%s" \
",CompletionTime=UTC_TIMESTAMP() WHERE FileID IN (%s) AND ChannelID = %s;" % ( time_order,
strFileIDs,
channelID )
res = self._update( req )
if not res['OK']:
err = 'TransferDB.updateCompletedChannelStatus: Failed to update %s files from Channel %s.' % ( len( fileIDs ),
channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def resetFileChannelStatus( self, channelID, fileIDs ):
""" set Channel.Status to 'Waiting' given :channelID: and list of :fileIDs:
:param self: self reference
:param int channelID: Channel.ChannelID
:param list fileIDs: list of Files.FileID
"""
time_order = self.__getFineTime()
req = "UPDATE Channel SET Status = 'Waiting',LastUpdate=UTC_TIMESTAMP(),LastUpdateTimeOrder=%s," \
"Retries=Retries+1 WHERE FileID IN (%s) AND ChannelID = %s;" % ( time_order,
intListToString( fileIDs ),
channelID )
res = self._update( req )
if not res['OK']:
err = 'TransferDB.resetFileChannelStatus: Failed to reset %s files from Channel %s.' % ( len( fileIDs ),
channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def getFileChannelAttribute( self, channelID, fileID, attribute ):
""" select column :attribute: from Channel table given :channelID: and :fileID:
:param self: self reference
:param int channelID: Channel.ChannelID
:param int fileID: Channel.FileID
:param atr attribute: column name
"""
req = "SELECT %s from Channel WHERE ChannelID = %s and FileID = %s;" % ( attribute, channelID, fileID )
res = self._query( req )
if not res['OK']:
err = "TransferDB.getFileChannelAttribute: Failed to get %s for File %s on Channel %s." % ( attribute,
fileID,
channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
err = "TransferDB.getFileChannelAttribute: File %s doesn't exist on Channel %s." % ( fileID, channelID )
return S_ERROR( err )
attrValue = res['Value'][0][0]
return S_OK( attrValue )
def setFileChannelStatus( self, channelID, fileID, status ):
""" set Channel.Status to :status:, if :status: is 'Failed', it will also set
Files.Status to it
:param self: self reference
:param int channelID: Channel.ChannelID
:param int fileID: Files.FileID
:param str status: new value for Channel.Status
"""
if status == 'Failed':
req = "UPDATE Files SET Status = 'Failed' WHERE FileID = %d" % fileID
res = self._update( req )
if not res['OK']:
return res
res = self.setFileChannelAttribute( channelID, fileID, 'Status', status )
return res
def setFileChannelAttribute( self, channelID, fileID, attribute, attrValue ):
""" update :attribute: in Channel table to value :attrValue: given :channelID: and :fileID:
:param self: self reference
:param int channelID: Channel.ChannelID
:param int fileID: Files.FileID
:param str attribute: Channel table column name
:param mixed attrValue: new value for :attribute:
"""
if type( fileID ) == ListType:
fileIDs = fileID
else:
fileIDs = [fileID]
time_order = self.__getFineTime()
req = "UPDATE Channel SET %s = '%s',LastUpdate=UTC_TIMESTAMP(),LastUpdateTimeOrder=%s " \
"WHERE ChannelID=%s and FileID IN (%s);" % ( attribute, attrValue, time_order,
channelID, intListToString( fileIDs ) )
res = self._update( req )
if not res['OK']:
err = 'TransferDB._setFileChannelAttribute: Failed to set %s to %s for %s files on Channel %s.' % ( attribute,
attrValue,
len( fileIDs ),
channelID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def getFilesForChannel( self, channelID, numberOfFiles, status = 'Waiting', sourceSE = None, targetSE = None ):
""" This method will only return Files for the oldest SourceSE,TargetSE Waiting for a given Channel.
"""
# req = "SELECT SourceSE,TargetSE FROM Channel WHERE ChannelID = %s AND Status = 'Waiting'
# ORDER BY Retries, LastUpdateTimeOrder LIMIT 1;" % (channelID)
if ( sourceSE and not targetSE ) or ( targetSE and not sourceSE ):
return S_ERROR( 'Both source and target SEs should be supplied' )
if not sourceSE and not targetSE:
req = "SELECT c.SourceSE,c.TargetSE FROM Channel as c,Files as f WHERE c.ChannelID=%s AND " \
"c.Status='%s' AND c.FileID=f.FileID ORDER BY c.Retries,c.LastUpdateTimeOrder LIMIT 1;" % ( channelID, status )
res = self._query( req )
if not res['OK']:
err = "TransferDB.getFilesForChannel: Failed to get files for Channel %s." % channelID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
return S_OK()
sourceSE, targetSE = res['Value'][0]
req = "SELECT c.FileID,c.SourceSURL,c.TargetSURL,c.FileSize,f.LFN FROM Files AS f, Channel AS c " \
"WHERE c.ChannelID=%s AND c.FileID=f.FileID AND c.Status ='%s' AND c.SourceSE='%s' " \
"AND c.TargetSE='%s' ORDER BY c.Retries,c.LastUpdateTimeOrder LIMIT %s;" % \
( channelID, status, sourceSE, targetSE, numberOfFiles )
res = self._query( req )
if not res['OK']:
err = "TransferDB.getFilesForChannel: Failed to get files for Channel %s." % channelID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
return S_OK()
keysTuple = ( "FileID", "SourceSURL", "TargetSURL", "Size", "LFN" )
resDict = { "SourceSE" : sourceSE,
"TargetSE" : targetSE,
"Files" : [ dict( zip( keysTuple, recordTuple ) ) for recordTuple in res["Value"] ] }
return S_OK( resDict )
def getChannelQueues( self, status = None ):
""" get Channel queues given Channel.Status :status:
if :status: is None, this will pick up all 'Waiting%'-like Channel records
:return: S_OK( { channelID : { "Files" : nbOfFiles, "Size" : sumOfFileSizes }, ... } )
:param self: self reference
:param str status: Channel.Status
"""
res = self.getChannels()
if not res['OK']:
return res
channels = res['Value']
channelIDs = channels.keys()
if status:
req = "SELECT ChannelID,COUNT(*),SUM(FileSize) FROM Channel WHERE Status = '%s' GROUP BY ChannelID;" % ( status )
else:
req = "SELECT ChannelID,COUNT(*),SUM(FileSize) FROM Channel WHERE Status LIKE 'Waiting%' GROUP BY ChannelID;"
res = self._query( req )
if not res['OK']:
err = "TransferDB.getChannelQueues: Failed to get Channel contents for Channels."
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
for channelID, fileCount, sizeCount in res['Value']:
channels[channelID]['Files'] = int( fileCount )
channels[channelID]['Size'] = int( sizeCount )
for channelID in channelIDs:
if "Files" not in channels[channelID]:
channels[channelID]['Files'] = 0
channels[channelID]['Size'] = 0
return S_OK( channels )
def getCompletedChannels( self, limit = 100 ):
""" get list of completed channels
:param int limit: select limit
"""
query = "SELECT DISTINCT FileID FROM Channel where Status = 'Done' AND FileID NOT IN ( SELECT FileID from Files ) LIMIT %s;" % limit
return self._query( query )
#################################################################################
# These are the methods for managing the FTSReq table
def insertFTSReq( self, ftsGUID, ftsServer, channelID ):
""" insert new FTSReq record
:return: S_OK( FTSReq.FTSReqID )
:param self: self reference
:param str ftsGUID: GUID returned from glite-submit-transfer
:param str ftsServer: FTS server URI
:param int channelID: Channel.ChannelID
"""
self.getIdLock.acquire()
req = "INSERT INTO FTSReq (FTSGUID,FTSServer,ChannelID,SubmitTime,LastMonitor) " \
"VALUES ('%s','%s',%s,UTC_TIMESTAMP(),UTC_TIMESTAMP());" % ( ftsGUID, ftsServer, channelID )
res = self._update( req )
if not res['OK']:
self.getIdLock.release()
err = "TransferDB._insertFTSReq: Failed to insert FTS GUID into FTSReq table."
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
req = "SELECT MAX(FTSReqID) FROM FTSReq;"
res = self._query( req )
self.getIdLock.release()
if not res['OK']:
err = "TransferDB._insertFTSReq: Failed to get FTSReqID from FTSReq table."
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
err = "TransferDB._insertFTSReq: Request details don't appear in FTSReq table."
return S_ERROR( err )
ftsReqID = res['Value'][0][0]
return S_OK( ftsReqID )
def setFTSReqStatus( self, ftsReqID, status ):
""" update FTSReq.Status to :status: given FTSReq.FTSReqID
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
:param str status: new status
"""
self.getIdLock.acquire()
req = "UPDATE FTSReq SET Status = '%s' WHERE FTSReqID = %s;" % ( status, ftsReqID )
res = self._update( req )
self.getIdLock.release()
if not res['OK']:
err = "TransferDB._setFTSReqStatus: Failed to set status to %s for FTSReq %s." % ( status, ftsReqID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def deleteFTSReq( self, ftsReqID ):
""" delete FTSReq record given FTSReq.FTSReqID
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
"""
self.getIdLock.acquire()
req = "DELETE FROM FTSReq WHERE FTSReqID = %s;" % ( ftsReqID )
res = self._update( req )
self.getIdLock.release()
if not res['OK']:
err = "TransferDB._deleteFTSReq: Failed to delete FTSReq %s." % ftsReqID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def getFTSReq( self ):
""" select 'Submitted' FTSReq
:param self: self reference
"""
# req = "SELECT f.FTSReqID,f.FTSGUID,f.FTSServer,f.ChannelID,f.SubmitTime,f.NumberOfFiles,f.TotalSize,c.SourceSite,
# c.DestinationSite FROM FTSReq as f,Channels as c WHERE f.Status = 'Submitted' and
# f.ChannelID=c.ChannelID ORDER BY f.LastMonitor;"
req = "SELECT FTSReqID,FTSGUID,FTSServer,ChannelID,SubmitTime,SourceSE,TargetSE,NumberOfFiles,TotalSize" \
" FROM FTSReq WHERE Status = 'Submitted' ORDER BY LastMonitor;"
res = self._query( req )
if not res['OK']:
err = "TransferDB._getFTSReq: Failed to get entry from FTSReq table."
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
# It is not an error that there are not requests
return S_OK()
keysTuple = ( "FTSReqID", "FTSGuid", "FTSServer", "ChannelID",
"SubmitTime", "SourceSE", "TargetSE", "NumberOfFiles", "TotalSize" )
ftsReqs = [ dict( zip( keysTuple, recordTuple ) ) for recordTuple in res["Value"] ]
return S_OK( ftsReqs )
def setFTSReqAttribute( self, ftsReqID, attribute, attrValue ):
""" set :attribute: column in FTSReq table to :attValue: given :ftsReqID:
:param self: slf reference
:param int ftsReqID: FTSReq.FTSReqID
:param str attribute: FTSReq column name
:param mixed attrValue: new value
"""
self.getIdLock.acquire()
req = "UPDATE FTSReq SET %s = '%s' WHERE FTSReqID = %s;" % ( attribute, attrValue, ftsReqID )
res = self._update( req )
self.getIdLock.release()
if not res['OK']:
err = "TransferDB._setFTSReqAttribute: Failed to set %s to %s for FTSReq %s." % ( attribute, attrValue, ftsReqID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def setFTSReqLastMonitor( self, ftsReqID ):
""" update FSTReq.LastMonitor timestamp for given :ftsReqID:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
"""
req = "UPDATE FTSReq SET LastMonitor = UTC_TIMESTAMP() WHERE FTSReqID = %s;" % ftsReqID
res = self._update( req )
if not res['OK']:
err = "TransferDB._setFTSReqLastMonitor: Failed to update monitoring time for FTSReq %s." % ftsReqID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
#################################################################################
# These are the methods for managing the FileToFTS table
def getFTSReqLFNs( self, ftsReqID, channelID = None, sourceSE = None ):
""" collect LFNs for files in FTSReq
:warning: if Files records are missing, a new artificial ones would be inserted
using SubRequests.SubRequestID = 0, this could happen only if original request
had been removed
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
:param int channelID: Channel.ChannelID
:param str sourceSE: source SE
"""
req = "SELECT ftf.FileID,f.LFN from FileToFTS as ftf LEFT JOIN Files as f " \
"ON (ftf.FileID=f.FileID) WHERE ftf.FTSReqID = %s;" % ftsReqID
res = self._query( req )
if not res['OK']:
err = "TransferDB.getFTSReqLFNs: Failed to get LFNs for FTSReq %s." % ftsReqID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
err = "TransferDB.getFTSReqLFNs: No LFNs found for FTSReq %s." % ftsReqID
return S_ERROR( err )
# # list of missing fileIDs
missingFiles = []
files = {}
for fileID, lfn in res['Value']:
if lfn:
files[lfn] = fileID
else:
error = "TransferDB.getFTSReqLFNs: File %s does not exist in the Files table." % fileID
gLogger.warn( error )
missingFiles.append( fileID )
# # failover mechnism for removed Requests
if missingFiles:
# # no channelID or sourceSE --> return S_ERROR
if not channelID and not sourceSE:
return S_ERROR( "TransferDB.getFTSReqLFNs: missing records in Files table: %s" % missingFiles )
# # create storage element
sourceSE = StorageElement( sourceSE )
# # get FileID, SourceSURL pairs for missing FileIDs and channelID used in this FTSReq
strMissing = intListToString( missingFiles )
query = "SELECT FileID,SourceSURL FROM Channel WHERE ChannelID=%s and FileID in (%s);" % ( channelID,
strMissing )
query = self._query( query )
if not query["OK"]:
gLogger.error( "TransferDB.getFSTReqLFNs: unable to select PFNs for missing Files: %s" % query["Message"] )
return query
# # guess LFN from StorageElement, prepare query for inserting records, save lfn in files dict
insertTemplate = "INSERT INTO Files (SubRequestID, FileID, LFN, Status) VALUES (0, %s, %s, 'Scheduled');"
insertQuery = []
for fileID, pfn in query["Value"]:
lfn = sourceSE.getPfnPath( pfn )
if not lfn["OK"]:
gLogger.error( "TransferDB.getFTSReqLFNs: %s" % lfn["Message"] )
return lfn
lfn = lfn["Value"]
files[lfn] = fileID
insertQuery.append( insertTemplate % ( fileID, lfn ) )
# # insert missing 'fake' records
if insertQuery:
ins = self._update( "\n".join( insertQuery ) )
if not ins["OK"]:
gLogger.error( "TransferDB.getFTSReqLFNs: unable to insert fake Files for missing LFNs: %s" % ins["Message"] )
return ins
# # return files dict
return S_OK( files )
def setFTSReqFiles( self, ftsReqID, channelID, fileAttributes ):
""" insert FileToFTS records for given :ftsReqID: and :channelID:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
:param int channelID: Channel.ChannelID
:param list fileAttributes: [ (fileID, fileSize), ... ]
"""
for fileID, fileSize in fileAttributes:
req = "INSERT INTO FileToFTS (FTSReqID,FileID,ChannelID,SubmissionTime,FileSize)" \
" VALUES (%s,%s,%s,UTC_TIMESTAMP(),%s);" % ( ftsReqID, fileID, channelID, fileSize )
res = self._update( req )
if not res['OK']:
err = "TransferDB._setFTSReqFiles: Failed to set File %s for FTSReq %s." % ( fileID, ftsReqID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return S_OK()
def getFTSReqFileIDs( self, ftsReqID ):
""" read FileToFTS.FileID for given :ftsReqID:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
"""
req = "SELECT FileID FROM FileToFTS WHERE FTSReqID = %s;" % ftsReqID
res = self._query( req )
if not res['OK']:
err = "TransferDB._getFTSReqFileIDs: Failed to get FileIDs for FTSReq %s." % ftsReqID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
if not res['Value']:
err = "TransferDB._getFTSReqLFNs: No FileIDs found for FTSReq %s." % ftsReqID
return S_ERROR( err )
fileIDs = [ fileID[0] for fileID in res["Value"] ]
return S_OK( fileIDs )
def getSizeOfCompletedFiles( self, ftsReqID, completedFileIDs ):
""" select size of transferred files in FTSRequest given :ftsReqID: and list of :completedFilesIDs:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
:param list completedFileIDs: list of Files.FileID
"""
strCompleted = intListToString( completedFileIDs )
req = "SELECT SUM(FileSize) FROM FileToFTS where FTSReqID = %s AND FileID IN (%s);" % ( ftsReqID, strCompleted )
res = self._query( req )
if not res['OK']:
err = "TransferDB._getSizeOfCompletedFiles: Failed to get successful transfer size for FTSReq %s." % ftsReqID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return S_OK( res['Value'][0][0] )
def removeFilesFromFTSReq( self, ftsReqID ):
""" delete all FileToFST records for given :ftsReqID:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
"""
req = "DELETE FROM FileToFTS WHERE FTSReqID = %s;" % ftsReqID
res = self._update( req )
if not res['OK']:
err = "TransferDB._removeFilesFromFTSReq: Failed to remove files for FTSReq %s." % ftsReqID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def setFileToFTSFileAttributes( self, ftsReqID, channelID, fileAttributeTuples ):
""" update FileToFTS records for given :ftsReqID: and :channeID:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
:param int channelID: Channel.ChannelID
:param list fileAttributeTuples: [ ( fileID, status, reason, retries, transferDuration ), ... ]
"""
for fileID, status, reason, retries, transferTime in fileAttributeTuples:
req = "UPDATE FileToFTS SET Status ='%s',Duration=%s,Reason='%s',Retries=%s,TerminalTime=UTC_TIMESTAMP() " \
"WHERE FileID=%s AND FTSReqID=%s AND ChannelID=%s;" % \
( status, transferTime, reason, retries, fileID, ftsReqID, channelID )
res = self._update( req )
if not res['OK']:
err = "TransferDB._setFileToFTSFileAttributes: Failed to set file attributes for FTSReq %s." % ftsReqID
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def setFileToFTSFileAttribute( self, ftsReqID, fileID, attribute, attrValue ):
""" update FileToFTS.:attribute: to :attrValue: for given :ftsReqID: and :fileID:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
:param int fileID: Files.FileID
:param str attribute: FileToFTS column name
:param mixed attrValue: new value
"""
req = "UPDATE FileToFTS SET %s = '%s' WHERE FTSReqID = %s AND FileID = %s;" % ( attribute,
attrValue,
ftsReqID,
fileID )
res = self._update( req )
if not res['OK']:
err = "TransferDB._setFileToFTSFileAttribute: Failed to set %s to %s for File %s and FTSReq %s;" % ( attribute,
attrValue,
fileID,
ftsReqID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def setFileToFTSTerminalTime( self, ftsReqID, fileID ):
""" update FileToFTS.TerminalTime timestamp for given :ftsReqID: and :fileID:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
:param int fileID: Files.FileID
"""
req = "UPDATE FileToFTS SET TerminalTime=UTC_TIMESTAMP() WHERE FTSReqID=%s AND FileID=%s;" % ( ftsReqID, fileID )
res = self._update( req )
if not res['OK']:
err = "TransferDB._setFileToFTSTerminalTime: Failed to set terminal time for File %s and FTSReq %s;" % \
( fileID, ftsReqID )
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
return res
def getCountFileToFTS( self, interval = 3600, status = "Failed" ):
""" get count of distinct FileIDs per Channel for Failed FileToFTS
:param self: self reference
:param str status: FileToFTS.Status
:param int interval: time period in seconds
:return: S_OK( { FileToFTS.ChannelID : int } )
"""
channels = self.getChannels()
if not channels["OK"]:
return channels
channels = channels["Value"]
# # this we're going to return
channelDict = dict.fromkeys( channels.keys(), 0 )
# # query
query = "SELECT ChannelID, COUNT(DISTINCT FileID) FROM FileToFTS WHERE Status='%s' AND " \
"SubmissionTime > (UTC_TIMESTAMP() - INTERVAL %s SECOND) GROUP BY ChannelID;" % ( status, interval )
# # query query to query :)
query = self._query( query )
if not query["OK"]:
return S_ERROR( "TransferDB.getCountFailedFTSFiles: " % query["Message"] )
# # return dict updated by dict created from query tuple :)
channelDict.update( dict( query["Value"] ) )
return S_OK( channelDict )
def getChannelObservedThroughput( self, interval ):
""" create and return a dict holding summary info about FTS channels
and related transfers in last :interval: seconds
:return: S_OK( { channelID : { "Throughput" : float,
"Fileput" : float,
"SuccessfulFiles" : int,
"FailedFiles" : int
}, ... } )
:param self: self reference
:param int interval: monitoring interval in seconds
"""
channels = self.getChannels()
if not channels["OK"]:
return channels
channels = channels['Value']
# # create empty channelDict
channelDict = dict.fromkeys( channels.keys(), None )
# # fill with zeros
for channelID in channelDict:
channelDict[channelID] = {}
channelDict[channelID]["Throughput"] = 0
channelDict[channelID]["Fileput"] = 0
channelDict[channelID]["SuccessfulFiles"] = 0
channelDict[channelID]["FailedFiles"] = 0
channelTimeDict = dict.fromkeys( channels.keys(), 0 )
req = "SELECT ChannelID, Status, Count(*), SUM(FileSize), SUM(TimeDiff) FROM " \
"( SELECT ChannelID, Status,TIME_TO_SEC( TIMEDIFF( TerminalTime, SubmissionTime ) ) " \
"AS TimeDiff ,FileSize FROM FileToFTS WHERE Status in ('Completed', 'Failed') " \
"AND SubmissionTime > (UTC_TIMESTAMP() - INTERVAL %s SECOND) ) " \
"AS T GROUP BY ChannelID, Status;" % interval
res = self._query( req )
if not res['OK']:
err = 'TransferDB.getChannelObservedThroughput: Failed to transfer Statistics.'
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
for channelID, status, files, data, totalTime in res['Value']:
channelTimeDict[channelID] += float( totalTime )
if status == 'Completed':
channelDict[channelID]['Throughput'] = float( data )
channelDict[channelID]['SuccessfulFiles'] = int( files )
else:
channelDict[channelID]['FailedFiles'] = int( files )
for channelID in channelDict.keys():
if channelTimeDict[channelID]:
channelDict[channelID]['Throughput'] = channelDict[channelID]['Throughput'] / channelTimeDict[channelID]
channelDict[channelID]['Fileput'] = channelDict[channelID]['SuccessfulFiles'] / channelTimeDict[channelID]
return S_OK( channelDict )
#############################################
# First get the total time spend transferring files on the channels
req = "SELECT ChannelID,SUM(TIME_TO_SEC(TIMEDIFF(TerminalTime,SubmissionTime))) FROM FileToFTS " \
"WHERE Status IN ('Completed','Failed') AND SubmissionTime > (UTC_TIMESTAMP() - INTERVAL %s SECOND) " \
"GROUP BY ChannelID;" % interval
res = self._query( req )
if not res['OK']:
err = 'TransferDB._getFTSObservedThroughput: Failed to obtain total time transferring.'
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
channelTimeDict = dict.fromkeys( channels.keys(), None )
for channelID, totalTime in res['Value']:
channelTimeDict[channelID] = float( totalTime )
#############################################
# Now get the total size of the data transferred and the number of files that were successful
req = "SELECT ChannelID,SUM(FileSize),COUNT(*) FROM FileToFTS WHERE Status='Completed' AND " \
"SubmissionTime > (UTC_TIMESTAMP() - INTERVAL %s SECOND) GROUP BY ChannelID;" % interval
res = self._query( req )
if not res['OK']:
err = 'TransferDB._getFTSObservedThroughput: Failed to obtain total transferred data and files.'
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
for channelID, data, files in res['Value']:
if channelID in channelTimeDict and channelTimeDict[channelID]:
channelDict[channelID] = { 'Throughput': float( data ) / channelTimeDict[channelID],
'Fileput': float( files ) / channelTimeDict[channelID] }
#############################################
# Now get the success rate on the channels
req = "SELECT ChannelID,SUM(Status='Completed'),SUM(Status='Failed') from FileToFTS WHERE " \
"SubmissionTime > (UTC_TIMESTAMP() - INTERVAL %s SECOND) GROUP BY ChannelID;" % ( interval )
res = self._query( req )
if not res['OK']:
err = 'TransferDB._getFTSObservedThroughput: Failed to obtain success rate.'
return S_ERROR( '%s\n%s' % ( err, res['Message'] ) )
for channelID, successful, failed in res['Value']:
channelDict[channelID]['SuccessfulFiles'] = int( successful )
channelDict[channelID]['FailedFiles'] = int( failed )
return S_OK( channelDict )
def getTransferDurations( self, channelID, startTime = None, endTime = None ):
""" This obtains the duration of the successful transfers on the supplied channel
"""
req = "SELECT Duration FROM FileToFTS WHERE ChannelID = %s and Duration > 0" % channelID
if startTime:
req = "%s AND SubmissionTime > '%s'" % req
if endTime:
req = "%s AND SubmissionTime < '%s'" % req
res = self._query( req )
if not res['OK']:
err = "TransferDB.getTransferDurations: Failed to obtain durations from FileToFTS"
return S_ERROR( err )
durations = []
for value in res['Value']:
durations.append( int( value[0] ) )
return S_OK( durations )
#################################################################################
# These are the methods for managing the FTSReqLogging table
def addLoggingEvent( self, ftsReqID, event ):
""" insert new FTSReqLogging :event:
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
:param str event: new event
"""
req = "INSERT INTO FTSReqLogging (FTSReqID,Event,EventDateTime) VALUES (%s,'%s',UTC_TIMESTAMP());" % ( ftsReqID,
event )
res = self._update( req )
if not res['OK']:
err = "TransferDB._addLoggingEvent: Failed to add logging event to FTSReq %s" % ftsReqID
return S_ERROR( err )
return res
#################################################################################
# These are the methods for managing the ReplicationTree table
def addReplicationTree( self, fileID, tree ):
""" insert new ReplicationTree record given :fileID: and replicationTree dictionary
:param self: self refere
:param int fileID: Files.FileID
:param dict tree: replicationTree produced by StrategyHandler
"""
for channelID, repDict in tree.items():
ancestor = repDict["Ancestor"] if repDict["Ancestor"] else "-"
strategy = repDict['Strategy']
req = "INSERT INTO ReplicationTree (FileID, ChannelID, AncestorChannel, Strategy, CreationTime) " \
" VALUES (%s,%s,'%s','%s',UTC_TIMESTAMP());" % ( fileID, channelID, ancestor, strategy )
res = self._update( req )
if not res['OK']:
err = "TransferDB._addReplicationTree: Failed to add ReplicationTree for file %s" % fileID
return S_ERROR( err )
return S_OK()
#################################################################################
# These are the methods for managing the FileToCat table
def addFileRegistration( self, channelID, fileID, lfn, targetSURL, destSE ):
""" insert new record into FileToCat table
:param self: self reference
:param int channelID: Channel.ChannelID
:param int fileID: Files.FileID
:param str lfn: Files.LFN
:param str targetSULR: Channel.TargetSURL
:param str destSE: Channel.TargetSE
"""
req = "INSERT INTO FileToCat (FileID,ChannelID,LFN,PFN,SE,SubmitTime) " \
"VALUES (%s,%s,'%s','%s','%s',UTC_TIMESTAMP());" % ( fileID, channelID, lfn, targetSURL, destSE )
res = self._update( req )
if not res['OK']:
err = "TransferDB._addFileRegistration: Failed to add registration entry for file %s" % fileID
return S_ERROR( err )
return S_OK()
def getCompletedReplications( self ):
""" get SubRequests.Operation, SubRequest.SourceSE, FileToCat.LFN for FileToCat.Status='Waiting'
:param self: self reference
"""
req = "SELECT sR.Operation,sR.SourceSE,fc.LFN FROM SubRequests AS sR, Files AS f, FileToCat AS fc " \
"WHERE fc.Status = 'Waiting' AND fc.FileID=f.FileID AND sR.SubRequestID=f.SubRequestID;"
res = self._query( req )
if not res['OK']:
err = "TransferDB._getCompletedReplications: Failed to get completed replications."
return S_ERROR( err )
# # lazy people are using list c'tor
return S_OK( list( res["Value"] ) )
def getWaitingRegistrations( self ):
""" select 'Waiting' records from FileToCat table
:return: S_OK( [ (fileID, channelID, LFN, PFN, SE), ... ] )
:param self: self reference
"""
req = "SELECT FileID, ChannelID, LFN, PFN, SE FROM FileToCat WHERE Status='Waiting';"
res = self._query( req )
if not res['OK']:
err = "TransferDB._getWaitingRegistrations: Failed to get registrations."
return S_ERROR( err )
# # less typing, use list constructor
return S_OK( list( res["Value"] ) )
def setRegistrationWaiting( self, channelID, fileIDs ):
""" set FileToCat.Status to 'Waiting' for given :channelID: and list if :fileIDs:
:param self: self reference
:param int channelID: Channel.ChannelID
:param list fileIDs: list of Files.FileID
"""
req = "UPDATE FileToCat SET Status='Waiting' WHERE ChannelID=%s AND " \
"Status='Executing' AND FileID IN (%s);" % ( channelID, intListToString( fileIDs ) )
res = self._update( req )
if not res['OK']:
err = "TransferDB._setRegistrationWaiting: Failed to update %s files status." % len( fileIDs )
return S_ERROR( err )
return S_OK()
def setRegistrationDone( self, channelID, fileID ):
""" set FileToCat.Status to 'Done' for given :channelID: and :fileID:,
update FileToCat.CompleteTime timestamp
:param self: self reference
:param int channelID: Channel.ChanneID
:param list fileID: Files.FileID or list of Files.FileID
"""
if type( fileID ) == int:
fileID = [ fileID ]
req = "UPDATE FileToCat SET Status='Done',CompleteTime=UTC_TIMESTAMP() " \
"WHERE FileID IN (%s) AND ChannelID=%s AND Status='Waiting';" % ( intListToString( fileID ), channelID )
res = self._update( req )
if not res['OK']:
err = "TransferDB._setRegistrationDone: Failed to update %s status." % fileID
return S_ERROR( err )
return S_OK()
def getRegisterFailover( self, fileID ):
""" in FTSMonitorAgent on failed registration
FileToCat.Status is set to 'Waiting' (was 'Executing')
got to query those for TA and will try to regiter them there
:param self: self reference
:param int fileID: Files.FileID
"""
query = "SELECT PFN, SE, ChannelID, MAX(SubmitTime) FROM FileToCat WHERE Status='Waiting' AND FileID=%s;" % fileID
res = self._query( query )
if not res["OK"]:
return res
# # from now on don't care about SubmitTime and empty records
res = [ rec[:3] for rec in res["Value"] if None not in rec[:3] ]
# # return list of tuples [ ( PFN, SE, ChannelID ), ... ]
return S_OK( res )
#################################################################################
# These are the methods used by the monitoring server
def getFTSJobDetail( self, ftsReqID ):
""" select detailed information about FTSRequest for given :ftsReqID:
:return: S_OK( [ ( LFN, FileToFTS.Status, Duration, Reason, Retries, FielSize ), ... ] )
:param self: self reference
:param int ftsReqID: FTSReq.FTSReqID
"""
req = "SELECT Files.LFN,FileToFTS.Status,Duration,Reason,Retries,FileSize FROM FileToFTS,Files " \
"WHERE FTSReqID =%s and Files.FileID=FileToFTS.FileID;" % ftsReqID
res = self._query( req )
if not res['OK']:
err = "TransferDB.getFTSJobDetail: Failed to get detailed info for FTSReq %s: %s." % ( ftsReqID, res['Message'] )
return S_ERROR( err )
# # lazy people are using list c'tor
return S_OK( list( res["Value"] ) )
def getSites( self ):
""" select distinct SourceSites and DestinationSites from Channels table
:param self: self reference
"""
req = "SELECT DISTINCT SourceSite FROM Channels;"
res = self._query( req )
if not res['OK']:
err = "TransferDB.getSites: Failed to get channel SourceSite: %s" % res['Message']
return S_ERROR( err )
sourceSites = [ record[0] for record in res["Value"] ]
req = "SELECT DISTINCT DestinationSite FROM Channels;"
res = self._query( req )
if not res['OK']:
err = "TransferDB.getSites: Failed to get channel DestinationSite: %s" % res['Message']
return S_ERROR( err )
destSites = [ record[0] for record in res["Value"] ]
return S_OK( { 'SourceSites' : sourceSites, 'DestinationSites' : destSites } )
def getFTSJobs( self ):
""" get FTS jobs
:param self: self reference
"""
req = "SELECT FTSReqID,FTSGUID,FTSServer,SubmitTime,LastMonitor,PercentageComplete," \
"Status,NumberOfFiles,TotalSize FROM FTSReq;"
res = self._query( req )
if not res['OK']:
err = "TransferDB.getFTSJobs: Failed to get detailed FTS jobs: %s" % res['Message']
return S_ERROR( err )
ftsReqs = []
for ftsReqID, ftsGUID, ftsServer, submitTime, lastMonitor, complete, status, files, size in res['Value']:
ftsReqs.append( ( ftsReqID, ftsGUID, ftsServer, str( submitTime ),
str( lastMonitor ), complete, status, files, size ) )
return S_OK( ftsReqs )
def cleanUp( self, gracePeriod = 60, limit = 10 ):
""" delete completed FTS requests
it is using txns to be sure we have a proper snapshot of db
:param self: self reference
:param int gracePeriod: grace period in days
:param int limit: selection of FTSReq limit
:return: S_OK( list( tuple( 'txCmd', txRes ), ... ) )
"""
ftsReqs = self._query( "".join( [ "SELECT FTSReqID, ChannelID FROM FTSReq WHERE Status = 'Finished' ",
"AND LastMonitor < DATE_SUB( UTC_DATE(), INTERVAL %s DAY ) LIMIT %s;" % ( gracePeriod,
limit ) ] ) )
if not ftsReqs["OK"]:
return ftsReqs
ftsReqs = [ item for item in ftsReqs["Value"] if None not in item ]
delQueries = []
for ftsReqID, channelID in ftsReqs:
fileIDs = self._query( "SELECT FileID from FileToFTS WHERE FTSReqID = %s AND ChannelID = %s;" % ( ftsReqID, channelID ) )
if not fileIDs["OK"]:
continue
fileIDs = [ fileID[0] for fileID in fileIDs["Value"] if fileID ]
for fileID in fileIDs:
delQueries.append( "DELETE FROM FileToFTS WHERE FileID = %s and FTSReqID = %s;" % ( fileID, ftsReqID ) )
delQueries.append( "DELETE FROM FTSReqLogging WHERE FTSReqID = %s;" % ftsReqID )
delQueries.append( "DELETE FROM FTSReq WHERE FTSReqID = %s;" % ftsReqID )
channels = self._query( "".join( [ "SELECT FileID, ChannelID FROM Channel ",
"WHERE FileID NOT IN ( SELECT FileID FROM Files ) "
"AND FileID NOT IN ( SELECT FileID FROM FileToFTS ) LIMIT %s;" % int( limit ) ] ) )
if not channels["OK"]:
return channels
channels = [ channel for channel in channels["Value"] if None not in channel ]
for channel in channels:
delQueries.append( "DELETE FROM Channel WHERE FileID = %s AND ChannelID = %s;" % channel )
delQueries.append( "DELETE FROM ReplicationTree WHERE FileID = %s AND ChannelID = %s;" % channel )
delQueries.append( "DELETE FROM FileToCat WHERE FileID = %s and ChannelID = %s;" % channel )
return self._transaction( sorted( delQueries ) )
def getAttributesForReqList( self, reqIDList, attrList = None ):
""" Get attributes for the requests in the req ID list.
Returns an S_OK structure with a dictionary of dictionaries as its Value:
ValueDict[FTSReqID][attribute_name] = attribute_value
"""
attrList = [] if not attrList else attrList
attrNames = ''
attr_tmp_list = [ 'FTSReqID', 'SourceSite', 'DestinationSite' ]
for attr in attrList:
if not attr in attr_tmp_list:
attrNames = '%sFTSReq.%s,' % ( attrNames, attr )
attr_tmp_list.append( attr )
attrNames = attrNames.strip( ',' )
reqList = ",".join( [ str( reqID ) for reqID in reqIDList ] )
req = 'SELECT FTSReq.FTSReqID,Channels.SourceSite,Channels.DestinationSite,%s FROM FTSReq,Channels ' \
'WHERE FTSReqID in (%s) AND Channels.ChannelID=FTSReq.ChannelID' % ( attrNames, reqList )
res = self._query( req )
if not res['OK']:
return res
retDict = {}
for attrValues in res['Value']:
reqDict = {}
for i in range( len( attr_tmp_list ) ):
try:
reqDict[attr_tmp_list[i]] = attrValues[i].tostring()
except:
reqDict[attr_tmp_list[i]] = str( attrValues[i] )
retDict[int( reqDict['FTSReqID'] )] = reqDict
return S_OK( retDict )
def selectFTSReqs( self, condDict, older = None, newer = None, orderAttribute = None, limit = None ):
""" Select fts requests matching the following conditions:
- condDict dictionary of required Key = Value pairs;
- with the last update date older and/or newer than given dates;
The result is ordered by FTSReqID if requested, the result is limited to a given
number of jobs if requested.
"""
condition = self.__OLDbuildCondition( condDict, older, newer )
if orderAttribute:
orderType = None
orderField = orderAttribute
if orderAttribute.find( ':' ) != -1:
orderType = orderAttribute.split( ':' )[1].upper()
orderField = orderAttribute.split( ':' )[0]
condition = condition + ' ORDER BY ' + orderField
if orderType:
condition = condition + ' ' + orderType
if limit:
condition = condition + ' LIMIT ' + str( limit )
cmd = 'SELECT FTSReqID from FTSReq, Channels ' + condition
res = self._query( cmd )
if not res['OK']:
return res
if not len( res['Value'] ):
return S_OK( [] )
return S_OK( map( self._to_value, res['Value'] ) )
def __OLDbuildCondition( self, condDict, older = None, newer = None ):
""" build SQL condition statement from provided condDict
and other extra conditions
:TODO: make sure it is not used and delete this
"""
condition = ''
conjunction = "WHERE"
if condDict:
for attrName, attrValue in condDict.items():
if attrName in [ 'SourceSites', 'DestinationSites' ]:
condition = ' %s %s Channels.%s=\'%s\'' % ( condition,
conjunction,
str( attrName.rstrip( 's' ) ),
str( attrValue ) )
else:
condition = ' %s %s FTSReq.%s=\'%s\'' % ( condition,
conjunction,
str( attrName ),
str( attrValue ) )
conjunction = "AND"
condition += " AND FTSReq.ChannelID = Channels.ChannelID "
else:
condition += " WHERE FTSReq.ChannelID = Channels.ChannelID "
if older:
condition = ' %s %s LastUpdateTime < \'%s\'' % ( condition,
conjunction,
str( older ) )
conjunction = "AND"
if newer:
condition = ' %s %s LastUpdateTime >= \'%s\'' % ( condition,
conjunction,
str( newer ) )
return condition
#############################################################################
#
# These are the methods for monitoring the Reuqests, SubRequests and Files table
#
def selectRequests( self, condDict, older = None, newer = None, orderAttribute = None, limit = None ):
""" Select requests matching the following conditions:
- condDict dictionary of required Key = Value pairs;
- with the last update date older and/or newer than given dates;
The result is ordered by RequestID if requested, the result is limited to a given
number of requests if requested.
"""
return self.__selectFromTable( 'Requests', 'RequestID', condDict, older, newer, orderAttribute, limit )
def selectSubRequests( self, condDict, older = None, newer = None, orderAttribute = None, limit = None ):
""" Select sub-requests matching the following conditions:
- condDict dictionary of required Key = Value pairs;
- with the last update date older and/or newer than given dates;
The result is ordered by SubRequestID if requested, the result is limited to a given
number of sub-requests if requested.
"""
return self.__selectFromTable( 'SubRequests', 'SubRequestID', condDict, older, newer, orderAttribute, limit )
def selectFiles( self, condDict, older = None, newer = None, orderAttribute = None, limit = None ):
""" Select files matching the following conditions:
- condDict dictionary of required Key = Value pairs;
- with the last update date older and/or newer than given dates;
The result is ordered by FileID if requested, the result is limited to a given
number of files if requested.
"""
return self.__selectFromTable( 'Files', 'FileID', condDict, older, newer, orderAttribute, limit )
def selectDatasets( self, condDict, older = None, newer = None, orderAttribute = None, limit = None ):
""" Select datasets matching the following conditions:
- condDict dictionary of required Key = Value pairs;
- with the last update date older and/or newer than given dates;
The result is ordered by DatasetID if requested, the result is limited to a given
number of datasets if requested.
"""
return self.__selectFromTable( 'Datasets', 'DatasetID', condDict, older, newer, orderAttribute, limit )
def getAttributesForRequestList( self, reqIDList, attrList = None ):
""" Get attributes for the requests in the the reqIDList.
Returns an S_OK structure with a dictionary of dictionaries as its Value:
ValueDict[reqID][attribute_name] = attribute_value
"""
attrList = [] if not attrList else attrList
return self.__getAttributesForList( 'Requests', 'RequestID', reqIDList, attrList )
def getAttributesForSubRequestList( self, subReqIDList, attrList = None ):
""" Get attributes for the subrequests in the the reqIDList.
Returns an S_OK structure with a dictionary of dictionaries as its Value:
ValueDict[subReqID][attribute_name] = attribute_value
"""
attrList = [] if not attrList else attrList
return self.__getAttributesForList( 'SubRequests', 'SubRequestID', subReqIDList, attrList )
def getAttributesForFilesList( self, fileIDList, attrList = None ):
""" Get attributes for the files in the the fileIDlist.
Returns an S_OK structure with a dictionary of dictionaries as its Value:
ValueDict[fileID][attribute_name] = attribute_value
"""
attrList = [] if not attrList else attrList
return self.__getAttributesForList( 'Files', 'FileID', fileIDList, attrList )
def getAttributesForDatasetList( self, datasetIDList, attrList = None ):
""" Get attributes for the datasets in the the datasetIDlist.
Returns an S_OK structure with a dictionary of dictionaries as its Value:
ValueDict[datasetID][attribute_name] = attribute_value
"""
attrList = [] if not attrList else attrList
return self.__getAttributesForList( 'Datasets', 'DatasetID', datasetIDList, attrList )
def __getAttributesForList( self, table, tableID, idList, attrList ):
""" select :table: columns specified in :attrList: for given :idList:
:param self: self reference
:param str table: tabel name
:param str tableID: primary key in :table:
:param list idList: list of :table:.:tableID:
:param list attrList: list of column names from :table:
"""
res = self.getFields( table, outFields = [tableID] + attrList, condDict = { tableID : idList } )
if not res['OK']:
return res
try:
retDict = {}
for retValues in res['Value']:
rowID = retValues[0]
reqDict = {}
reqDict[tableID] = rowID
attrValues = retValues[1:]
for i in range( len( attrList ) ):
try:
reqDict[attrList[i]] = attrValues[i].tostring()
except Exception, error:
reqDict[attrList[i]] = str( attrValues[i] )
retDict[int( rowID )] = reqDict
return S_OK( retDict )
except Exception, error:
return S_ERROR( 'TransferDB.__getAttributesForList: Failed\n%s' % str( error ) )
def __selectFromTable( self, table, tableID, condDict, older, newer, orderAttribute, limit ):
""" select something from table something
"""
res = self.getFields( table, [tableID], condDict, limit,
older = older, newer = newer,
timeStamp = 'LastUpdateTime',
orderAttribute = orderAttribute )
if not res['OK']:
return res
if not len( res['Value'] ):
return S_OK( [] )
return S_OK( map( self._to_value, res['Value'] ) )
def getDistinctRequestAttributes( self, attribute, condDict = None, older = None, newer = None ):
""" Get distinct values of the Requests table attribute under specified conditions
"""
return self.getDistinctAttributeValues( 'Requests', attribute, condDict, older, newer, timeStamp = 'LastUpdateTime' )
def getDistinctSubRequestAttributes( self, attribute, condDict = None, older = None, newer = None ):
""" Get distinct values of SubRequests the table attribute under specified conditions
"""
return self.getDistinctAttributeValues( 'SubRequests', attribute, condDict, older, newer, timeStamp = 'LastUpdateTime' )
def getDistinctFilesAttributes( self, attribute, condDict = None, older = None, newer = None, timeStamp = None ):
""" Get distinct values of the Files table attribute under specified conditions
"""
return self.getDistinctAttributeValues( 'Files', attribute, condDict = None, older = None, newer = None, timeStamp = None )
def getDistinctChannelAttributes( self, attribute, condDict = None, older = None, newer = None, timeStamp = 'LastUpdateTime' ):
""" Get distinct values of the Channel table attribute under specified conditions
"""
return self.getDistinctAttributeValues( 'Channel', attribute, condDict = None , older = None, newer = None, timeStamp = 'LastUpdate' )
def getDistinctChannelsAttributes( self, attribute, condDict = None, older = None, newer = None, timeStamp = None ):
""" Get distinct values of the Channels table attribute under specified conditions
"""
return self.getDistinctAttributeValues( 'Channels', attribute, condDict, older, newer, timeStamp )
| gpl-3.0 |
soldag/home-assistant | tests/components/mqtt/test_alarm_control_panel.py | 4 | 24043 | """The tests the MQTT alarm control panel component."""
import copy
import json
import pytest
from homeassistant.components import alarm_control_panel
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_DISARMING,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.async_mock import patch
from tests.common import assert_setup_component, async_fire_mqtt_message
from tests.components.alarm_control_panel import common
CODE_NUMBER = "1234"
CODE_TEXT = "HELLO_CODE"
DEFAULT_CONFIG = {
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
}
}
DEFAULT_CONFIG_CODE = {
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
"code": "0123",
"code_arm_required": True,
}
}
async def test_fail_setup_without_state_topic(hass, mqtt_mock):
"""Test for failing with no state topic."""
with assert_setup_component(0) as config:
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"command_topic": "alarm/command",
}
},
)
assert not config[alarm_control_panel.DOMAIN]
async def test_fail_setup_without_command_topic(hass, mqtt_mock):
"""Test failing with no command topic."""
with assert_setup_component(0):
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"state_topic": "alarm/state",
}
},
)
async def test_update_state_via_state_topic(hass, mqtt_mock):
"""Test updating with via state topic."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG,
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_UNKNOWN
for state in (
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_PENDING,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMING,
STATE_ALARM_TRIGGERED,
):
async_fire_mqtt_message(hass, "alarm/state", state)
assert hass.states.get(entity_id).state == state
async def test_ignore_update_state_if_unknown_via_state_topic(hass, mqtt_mock):
"""Test ignoring updates via state topic."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG,
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "alarm/state", "unsupported state")
assert hass.states.get(entity_id).state == STATE_UNKNOWN
async def test_arm_home_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while armed."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG,
)
await hass.async_block_till_done()
await common.async_alarm_arm_home(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_HOME", 0, False
)
async def test_arm_home_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock):
"""Test not publishing of MQTT messages with invalid.
When code_arm_required = True
"""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG_CODE,
)
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_arm_home(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_arm_home_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages.
When code_arm_required = False
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code_arm_required"] = False
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
config,
)
await hass.async_block_till_done()
await common.async_alarm_arm_home(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_HOME", 0, False
)
async def test_arm_away_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while armed."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG,
)
await hass.async_block_till_done()
await common.async_alarm_arm_away(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_AWAY", 0, False
)
async def test_arm_away_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock):
"""Test not publishing of MQTT messages with invalid code.
When code_arm_required = True
"""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG_CODE,
)
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_arm_away(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_arm_away_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages.
When code_arm_required = False
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code_arm_required"] = False
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
config,
)
await hass.async_block_till_done()
await common.async_alarm_arm_away(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_AWAY", 0, False
)
async def test_arm_night_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while armed."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG,
)
await hass.async_block_till_done()
await common.async_alarm_arm_night(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_NIGHT", 0, False
)
async def test_arm_night_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock):
"""Test not publishing of MQTT messages with invalid code.
When code_arm_required = True
"""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG_CODE,
)
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_arm_night(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_arm_night_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages.
When code_arm_required = False
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code_arm_required"] = False
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
config,
)
await hass.async_block_till_done()
await common.async_alarm_arm_night(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_NIGHT", 0, False
)
async def test_arm_custom_bypass_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while armed."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
}
},
)
await hass.async_block_till_done()
await common.async_alarm_arm_custom_bypass(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_CUSTOM_BYPASS", 0, False
)
async def test_arm_custom_bypass_not_publishes_mqtt_with_invalid_code_when_req(
hass, mqtt_mock
):
"""Test not publishing of MQTT messages with invalid code.
When code_arm_required = True
"""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
"code": "1234",
"code_arm_required": True,
}
},
)
await hass.async_block_till_done()
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_arm_custom_bypass(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_arm_custom_bypass_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages.
When code_arm_required = False
"""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
"code": "1234",
"code_arm_required": False,
}
},
)
await hass.async_block_till_done()
await common.async_alarm_arm_custom_bypass(hass)
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", "ARM_CUSTOM_BYPASS", 0, False
)
async def test_disarm_publishes_mqtt(hass, mqtt_mock):
"""Test publishing of MQTT messages while disarmed."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG,
)
await hass.async_block_till_done()
await common.async_alarm_disarm(hass)
mqtt_mock.async_publish.assert_called_once_with("alarm/command", "DISARM", 0, False)
async def test_disarm_publishes_mqtt_with_template(hass, mqtt_mock):
"""Test publishing of MQTT messages while disarmed.
When command_template set to output json
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code"] = "0123"
config[alarm_control_panel.DOMAIN][
"command_template"
] = '{"action":"{{ action }}","code":"{{ code }}"}'
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
config,
)
await hass.async_block_till_done()
await common.async_alarm_disarm(hass, "0123")
mqtt_mock.async_publish.assert_called_once_with(
"alarm/command", '{"action":"DISARM","code":"0123"}', 0, False
)
async def test_disarm_publishes_mqtt_when_code_not_req(hass, mqtt_mock):
"""Test publishing of MQTT messages while disarmed.
When code_disarm_required = False
"""
config = copy.deepcopy(DEFAULT_CONFIG_CODE)
config[alarm_control_panel.DOMAIN]["code"] = "1234"
config[alarm_control_panel.DOMAIN]["code_disarm_required"] = False
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
config,
)
await hass.async_block_till_done()
await common.async_alarm_disarm(hass)
mqtt_mock.async_publish.assert_called_once_with("alarm/command", "DISARM", 0, False)
async def test_disarm_not_publishes_mqtt_with_invalid_code_when_req(hass, mqtt_mock):
"""Test not publishing of MQTT messages with invalid code.
When code_disarm_required = True
"""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
DEFAULT_CONFIG_CODE,
)
call_count = mqtt_mock.async_publish.call_count
await common.async_alarm_disarm(hass, "abcd")
assert mqtt_mock.async_publish.call_count == call_count
async def test_update_state_via_state_topic_template(hass, mqtt_mock):
"""Test updating with template_value via state topic."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"state_topic": "test-topic",
"value_template": "\
{% if (value | int) == 100 %}\
armed_away\
{% else %}\
disarmed\
{% endif %}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("alarm_control_panel.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "test-topic", "100")
state = hass.states.get("alarm_control_panel.test")
assert state.state == STATE_ALARM_ARMED_AWAY
async def test_attributes_code_number(hass, mqtt_mock):
"""Test attributes which are not supported by the vacuum."""
config = copy.deepcopy(DEFAULT_CONFIG)
config[alarm_control_panel.DOMAIN]["code"] = CODE_NUMBER
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("alarm_control_panel.test")
assert (
state.attributes.get(alarm_control_panel.ATTR_CODE_FORMAT)
== alarm_control_panel.FORMAT_NUMBER
)
async def test_attributes_code_text(hass, mqtt_mock):
"""Test attributes which are not supported by the vacuum."""
config = copy.deepcopy(DEFAULT_CONFIG)
config[alarm_control_panel.DOMAIN]["code"] = CODE_TEXT
assert await async_setup_component(hass, alarm_control_panel.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("alarm_control_panel.test")
assert (
state.attributes.get(alarm_control_panel.ATTR_CODE_FORMAT)
== alarm_control_panel.FORMAT_TEXT
)
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG_CODE
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one alarm per unique_id."""
config = {
alarm_control_panel.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, alarm_control_panel.DOMAIN, config)
async def test_discovery_removal_alarm(hass, mqtt_mock, caplog):
"""Test removal of discovered alarm_control_panel."""
data = json.dumps(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
await help_test_discovery_removal(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data
)
async def test_discovery_update_alarm_topic_and_template(hass, mqtt_mock, caplog):
"""Test update of discovered alarm_control_panel."""
config1 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
config2 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
config1["name"] = "Beer"
config2["name"] = "Milk"
config1["state_topic"] = "alarm/state1"
config2["state_topic"] = "alarm/state2"
config1["value_template"] = "{{ value_json.state1.state }}"
config2["value_template"] = "{{ value_json.state2.state }}"
state_data1 = [
([("alarm/state1", '{"state1":{"state":"armed_away"}}')], "armed_away", None),
]
state_data2 = [
([("alarm/state1", '{"state1":{"state":"triggered"}}')], "armed_away", None),
([("alarm/state1", '{"state2":{"state":"triggered"}}')], "armed_away", None),
([("alarm/state2", '{"state1":{"state":"triggered"}}')], "armed_away", None),
([("alarm/state2", '{"state2":{"state":"triggered"}}')], "triggered", None),
]
data1 = json.dumps(config1)
data2 = json.dumps(config2)
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
alarm_control_panel.DOMAIN,
data1,
data2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_alarm_template(hass, mqtt_mock, caplog):
"""Test update of discovered alarm_control_panel."""
config1 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
config2 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
config1["name"] = "Beer"
config2["name"] = "Milk"
config1["state_topic"] = "alarm/state1"
config2["state_topic"] = "alarm/state1"
config1["value_template"] = "{{ value_json.state1.state }}"
config2["value_template"] = "{{ value_json.state2.state }}"
state_data1 = [
([("alarm/state1", '{"state1":{"state":"armed_away"}}')], "armed_away", None),
]
state_data2 = [
([("alarm/state1", '{"state1":{"state":"triggered"}}')], "armed_away", None),
([("alarm/state1", '{"state2":{"state":"triggered"}}')], "triggered", None),
]
data1 = json.dumps(config1)
data2 = json.dumps(config2)
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
alarm_control_panel.DOMAIN,
data1,
data2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_unchanged_alarm(hass, mqtt_mock, caplog):
"""Test update of discovered alarm_control_panel."""
config1 = copy.deepcopy(DEFAULT_CONFIG[alarm_control_panel.DOMAIN])
config1["name"] = "Beer"
data1 = json.dumps(config1)
with patch(
"homeassistant.components.mqtt.alarm_control_panel.MqttAlarm.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, alarm_control_panel.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT alarm control panel device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT alarm control panel device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, alarm_control_panel.DOMAIN, DEFAULT_CONFIG
)
| apache-2.0 |
navtejsingh/pychimera | scripts/reduce.py | 2 | 6009 | #!/usr/bin/env python
"""
--------------------------------------------------------------------------
Routine to reduce raw science images from CHIMERA instrument.
Usage: python reduce.py [options] image
Author:
Navtej Saini
Organization:
Caltech, Pasadena, CA, USA
Version:
20 December 2015 0.1dev Initial implementation
05 February 2016 0.2 Added code to read in separate flat field
bias image.
--------------------------------------------------------------------------
"""
import os, sys
from StringIO import StringIO
from optparse import OptionParser
import chimera
def process(sci_files, sci_bias_file, flat_file, nskip, threshold, flat_bias_file, output):
"""
Entry point function to process science images.
Parameters
----------
sci_files : string
Science image file names
sci_bias_file : string
Bias image file name for science images
flat_file : string
Flat field file name
skip : int
Number of bias frames to skip before averaging the frames. Default is 0.
threshold : float
Threshold for normalized fat field (value between 0 and 1.0).
Default is 0.8.
flat_bias_file : string
Bias image file name for flat field images
Returns
-------
None
"""
print "REDUCE: CHIMERA Image Reduction Rotuine"
nskip = int(nskip)
threshold = float(threshold)
# Check if input is a string of FITS images or a text file with file names
if sci_files[0] == "@":
infile = sci_files[1:]
if not os.path.exists(infile):
print " Not able to locate file %s" %infile
image_cubes = []
with open(infile, "r") as fd:
for line in fd.readlines():
if len(line) > 1:
image_cubes.append(line.replace("\n", ""))
else:
image_cubes = sci_files.split(",")
# Read bias and flat field images
sci_bias_image = chimera.fitsread(sci_bias_file)
flat_image = chimera.fitsread(flat_file)
if flat_bias_file != "":
flat_bias_image = chimera.fitsread(flat_bias_file)
else:
flat_bias_image = sci_bias_image
# Create master bias image
print " Generating master bias image"
master_sci_bias_image = chimera.masterbias(sci_bias_image)
master_flat_bias_image = chimera.masterbias(flat_bias_image)
# Create normalized flat field
print " Generating normalized flat field image"
master_flat_image = chimera.masterflat(flat_image, master_flat_bias_image)
ncubes = len(image_cubes)
for i in range(ncubes):
sci_file = image_cubes[i]
print " Reducing science image : ", sci_file
sci_image, header = chimera.fitsread(sci_file, header = True)
# Reduced the science frames
sci_red_image, sci_avg_image = chimera.imreduce(sci_image, master_sci_bias_image, master_flat_image)
# Write the reduced and average FITS image
if output != "":
red_file = output + "_final.fits"
avg_file = output + "_avg.fits"
else:
red_file = sci_file.replace('.fits', '_final.fits')
avg_file = sci_file.replace('.fits', '_avg.fits')
if os.path.exists(red_file):
os.remove(red_file)
if os.path.exists(avg_file):
os.remove(avg_file)
chimera.fitswrite(sci_red_image, red_file, header = header)
chimera.fitswrite(sci_avg_image, avg_file, header = header)
print " Reduced science image : ", red_file
return
if __name__ == "__main__":
usage = "Usage: python %prog [options] sci_image bias_image flat_image"
description = "Description. Utility to reduce raw science CHIMERA instrument images."
parser = OptionParser(usage = usage, version = "%prog 0.1dev", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-s", "--skip", dest = "skip",
action='store', metavar="SKIP", help = "Number of frames to skip for average master bias (default is 0)",
default = 0
)
parser.add_option("-t", "--threshold", dest = "threshold",
action='store', metavar="THRESHOLD", help = "Threshold for normalized flatfields (default is 0.8)",
default = 0.8
)
parser.add_option("-o", "--output", dest = "output",
action="store", metavar="OUTPUT", help = "Output file name",
default = ""
)
parser.add_option("-f", "--flat-bias", dest = "flatbias",
action="store", metavar="FLATBIAS", help = "Bias for Flat field image if different from science image",
default = ""
)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error("REDUCE: Incorrect number of arguments")
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
process(args[0], args[1], args[2], options.skip, options.threshold, options.flatbias, options.output)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout | bsd-3-clause |
repotvsupertuga/repo | plugin.video.exodus/resources/lib/modules/cache.py | 12 | 3662 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,hashlib,time
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
from resources.lib.modules import control
def get(function, timeout, *args, **table):
try:
response = None
f = repr(function)
f = re.sub('.+\smethod\s|.+function\s|\sat\s.+|\sof\s.+', '', f)
a = hashlib.md5()
for i in args: a.update(str(i))
a = str(a.hexdigest())
except:
pass
try:
table = table['table']
except:
table = 'rel_list'
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.cacheFile)
dbcur = dbcon.cursor()
dbcur.execute("SELECT * FROM %s WHERE func = '%s' AND args = '%s'" % (table, f, a))
match = dbcur.fetchone()
response = eval(match[2].encode('utf-8'))
t1 = int(match[3])
t2 = int(time.time())
update = (abs(t2 - t1) / 3600) >= int(timeout)
if update == False:
return response
except:
pass
try:
r = function(*args)
if (r == None or r == []) and not response == None:
return response
elif (r == None or r == []):
return r
except:
return
try:
r = repr(r)
t = int(time.time())
dbcur.execute("CREATE TABLE IF NOT EXISTS %s (""func TEXT, ""args TEXT, ""response TEXT, ""added TEXT, ""UNIQUE(func, args)"");" % table)
dbcur.execute("DELETE FROM %s WHERE func = '%s' AND args = '%s'" % (table, f, a))
dbcur.execute("INSERT INTO %s Values (?, ?, ?, ?)" % table, (f, a, r, t))
dbcon.commit()
except:
pass
try:
return eval(r.encode('utf-8'))
except:
pass
def timeout(function, *args, **table):
try:
response = None
f = repr(function)
f = re.sub('.+\smethod\s|.+function\s|\sat\s.+|\sof\s.+', '', f)
a = hashlib.md5()
for i in args: a.update(str(i))
a = str(a.hexdigest())
except:
pass
try:
table = table['table']
except:
table = 'rel_list'
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.cacheFile)
dbcur = dbcon.cursor()
dbcur.execute("SELECT * FROM %s WHERE func = '%s' AND args = '%s'" % (table, f, a))
match = dbcur.fetchone()
return int(match[3])
except:
return
def clear(table=None):
try:
if table == None: table = ['rel_list', 'rel_lib']
elif not type(table) == list: table = [table]
dbcon = database.connect(control.cacheFile)
dbcur = dbcon.cursor()
for t in table:
try:
dbcur.execute("DROP TABLE IF EXISTS %s" % t)
dbcur.execute("VACUUM")
dbcon.commit()
except:
pass
except:
pass
| gpl-2.0 |
Red-M/CloudBot | cloudbot/config.py | 30 | 3100 | import json
import os
import time
import sys
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import cloudbot
logger = logging.getLogger("cloudbot")
class Config(dict):
"""
:type filename: str
:type path: str
:type bot: cloudbot.bot.CloudBot
:type observer: Observer
:type event_handler: ConfigEventHandler
"""
def __init__(self, bot, *args, **kwargs):
"""
:type bot: cloudbot.bot.CloudBot
:type args: list
:type kwargs: dict
"""
super().__init__(*args, **kwargs)
self.filename = "config.json"
self.path = os.path.abspath(self.filename)
self.bot = bot
self.update(*args, **kwargs)
# populate self with config data
self.load_config()
self.reloading_enabled = self.get("reloading", {}).get("config_reloading", True)
if self.reloading_enabled:
# start watcher
self.observer = Observer()
pattern = "*{}".format(self.filename)
self.event_handler = ConfigEventHandler(self.bot, self, patterns=[pattern])
self.observer.schedule(self.event_handler, path='.', recursive=False)
self.observer.start()
def stop(self):
"""shuts down the config reloader"""
if self.reloading_enabled:
self.observer.stop()
def load_config(self):
"""(re)loads the bot config from the config file"""
if not os.path.exists(self.path):
# if there is no config, show an error and die
logger.critical("No config file found, bot shutting down!")
print("No config file found! Bot shutting down in five seconds.")
print("Copy 'config.default.json' to 'config.json' for defaults.")
print("For help, see http://git.io/cloudbotirc. Thank you for using CloudBot!")
time.sleep(5)
sys.exit()
with open(self.path) as f:
self.update(json.load(f))
logger.debug("Config loaded from file.")
# reload permissions
if self.bot.connections:
for connection in self.bot.connections.values():
connection.permissions.reload()
def save_config(self):
"""saves the contents of the config dict to the config file"""
json.dump(self, open(self.path, 'w'), sort_keys=True, indent=4)
logger.info("Config saved to file.")
class ConfigEventHandler(PatternMatchingEventHandler):
"""
:type bot: cloudbot.bot.CloudBot
:type config: core.config.Config
:type logger: logging.Logger
"""
def __init__(self, bot, config, *args, **kwargs):
"""
:type bot: cloudbot.bot.CloudBot
:type config: Config
"""
self.bot = bot
self.config = config
PatternMatchingEventHandler.__init__(self, *args, **kwargs)
def on_any_event(self, event):
if self.bot.running:
logger.info("Config changed, triggering reload.")
self.config.load_config()
| gpl-3.0 |
hojel/epubia | scraper/naver_scraper.py | 3 | 3343 | # -*- encoding: utf-8 -*-
# Book Info using Naver OpenAPI
import urllib
from xml.dom.minidom import parseString
import re
MARKUP_PTN = re.compile(r'</?[a-z]+>')
class book_scraper:
key = '' # my key
srch_url = 'http://openapi.naver.com/search?key={0:s}&query={1:s}&display=1&target=book'
isbn_url = 'http://openapi.naver.com/search?key={0:s}&query={1:s}&display=1&target=book_adv&d_isbn={1:s}'
img_url = 'http://book.daum-img.net/image/KOR{0:s}'
default_value = {'title':'','author':'','isbn':'',
'cover_url':'',
'publisher':'','publishdate':'',
'description':'','subject':''}
def __init__(self):
pass
def search(self,qstr,maxresult=None):
return self._parse( urllib.urlopen(self.srch_url.format(self.key, urllib.quote_plus(qstr.encode('utf-8')))).read() )
def fetch(self,isbn):
result = self._parse( urllib.urlopen(self.isbn_url.format(self.key, isbn)).read() )
return result[0] if result else None
def _parse(self,xml):
info = []
dom = parseString(xml)
if dom.childNodes[0].nodeName == 'error':
print xml
return None
assert dom.childNodes[0].childNodes[0].nodeName == 'channel'
for node in dom.childNodes[0].childNodes[0].childNodes:
if node.nodeName == 'item':
pkt = self.default_value
for e in node.childNodes:
if e.nodeName == 'title':
pkt['title'] = self._cleanup(e.childNodes[0].nodeValue)
elif e.nodeName == 'author':
if e.childNodes:
pkt['author'] = self._cleanup(e.childNodes[0].nodeValue)
elif e.nodeName == 'image' and e.childNodes:
pkt['cover_url'] = e.childNodes[0].nodeValue.replace('=m1','=m256')
elif e.nodeName == 'publisher':
pkt['publisher'] = e.childNodes[0].nodeValue
elif e.nodeName == 'pubdate':
ss = e.childNodes[0].nodeValue
pkt['publishdate'] = "%s-%s-%s" % (ss[0:4],ss[4:6],ss[6:8])
elif e.nodeName == 'description':
if e.childNodes:
pkt['description'] = self._cleanup(e.childNodes[0].nodeValue)
elif e.nodeName == 'isbn':
if e.childNodes:
pkt['isbn'] = self._cleanup(e.childNodes[0].nodeValue.split(' ')[-1])
if pkt['cover_url'] == '' and len(pkt['isbn']) == 13:
pkt['cover_url'] = self.img_url.format(pkt['isbn'], pkt['isbn'][-3:-1])
info.append( pkt )
return info
def _cleanup(self,str):
return MARKUP_PTN.sub('',str).replace('&','&').replace('<','<').replace('>','>')
if __name__ == "__main__":
info = book_scraper().search( "은하영웅전설 1" )[0]
print info['title']
print info['author']
print info['cover_url']
info = book_scraper().search( "[이광수]무정" )[0]
print info['title']
print info['author']
print info['cover_url']
# vim:ts=4:sw=4:et
| mit |
sameerparekh/pants | tests/python/pants_test/python/test_python_run_integration.py | 8 | 2582 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class PythonRunIntegrationTest(PantsRunIntegrationTest):
def test_run_26(self):
self._maybe_run_version('2.6')
def test_run_27(self):
self._maybe_run_version('2.7')
def test_run_27_and_then_26(self):
with temporary_dir() as interpreters_cache:
pants_ini_config = {'python-setup': {'interpreter_cache_dir': interpreters_cache}}
pants_run_27 = self.run_pants(
command=['run', 'tests/python/pants_test/python:echo_interpreter_version_2.7'],
config=pants_ini_config
)
self.assert_success(pants_run_27)
pants_run_26 = self.run_pants(
command=['run', 'tests/python/pants_test/python:echo_interpreter_version_2.6',
'--interpreter=CPython>=2.6,<3', '--interpreter=CPython>=3.3'],
config=pants_ini_config
)
self.assert_success(pants_run_26)
def test_die(self):
command = ['run',
'tests/python/pants_test/python:die',
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--quiet']
pants_run = self.run_pants(command=command)
assert pants_run.returncode == 57
def _maybe_run_version(self, version):
if self.has_python_version(version):
print('Found python %s. Testing running on it.' % version)
echo = self._run_echo_version(version)
v = echo.split('.') # E.g., 2.6.8.
self.assertTrue(len(v) > 2, 'Not a valid version string: %s' % v)
self.assertEquals(version, '%s.%s' % (v[0], v[1]))
else:
print('No python %s found. Skipping.' % version)
pytest.skip('No python %s on system' % version)
def _run_echo_version(self, version):
binary_name = 'echo_interpreter_version_%s' % version
binary_target = 'tests/python/pants_test/python:' + binary_name
# Build a pex.
# Avoid some known-to-choke-on interpreters.
command = ['run',
binary_target,
'--interpreter=CPython>=2.6,<3',
'--interpreter=CPython>=3.3',
'--quiet']
pants_run = self.run_pants(command=command)
return pants_run.stdout_data.rstrip().split('\n')[-1]
| apache-2.0 |
Spleen64/Sick-Beard | sickbeard/clients/requests/packages/urllib3/contrib/ntlmpool.py | 262 | 4740 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| gpl-3.0 |
timgraham/django-cms | cms/utils/__init__.py | 1 | 3222 | # -*- coding: utf-8 -*-
# TODO: this is just stuff from utils.py - should be splitted / moved
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.utils.functional import LazyObject
from cms import constants
from cms.utils.conf import get_cms_setting
from cms.utils.conf import get_site_id # nopyflakes
from cms.utils.i18n import get_default_language
from cms.utils.i18n import get_language_list
from cms.utils.i18n import get_language_code
def get_template_from_request(request, obj=None, no_current_page=False):
"""
Gets a valid template from different sources or falls back to the default
template.
"""
template = None
if len(get_cms_setting('TEMPLATES')) == 1:
return get_cms_setting('TEMPLATES')[0][0]
if hasattr(request, 'POST') and "template" in request.POST:
template = request.POST['template']
elif hasattr(request, 'GET') and "template" in request.GET:
template = request.GET['template']
if not template and obj is not None:
template = obj.get_template()
if not template and not no_current_page and hasattr(request, "current_page"):
current_page = request.current_page
if hasattr(current_page, "get_template"):
template = current_page.get_template()
if template is not None and template in dict(get_cms_setting('TEMPLATES')).keys():
if template == constants.TEMPLATE_INHERITANCE_MAGIC and obj:
# Happens on admin's request when changing the template for a page
# to "inherit".
return obj.get_template()
return template
return get_cms_setting('TEMPLATES')[0][0]
def get_language_from_request(request, current_page=None):
"""
Return the most obvious language according the request
"""
language = None
if hasattr(request, 'POST'):
language = request.POST.get('language', None)
if hasattr(request, 'GET') and not language:
language = request.GET.get('language', None)
site_id = current_page.site_id if current_page else None
if language:
language = get_language_code(language)
if not language in get_language_list(site_id):
language = None
if not language:
language = get_language_code(getattr(request, 'LANGUAGE_CODE', None))
if language:
if not language in get_language_list(site_id):
language = None
if not language and current_page:
# in last resort, get the first language available in the page
languages = current_page.get_languages()
if len(languages) > 0:
language = languages[0]
if not language:
# language must be defined in CMS_LANGUAGES, so check first if there
# is any language with LANGUAGE_CODE, otherwise try to split it and find
# best match
language = get_default_language(site_id=site_id)
return language
default_storage = 'django.contrib.staticfiles.storage.StaticFilesStorage'
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(getattr(settings, 'STATICFILES_STORAGE', default_storage))()
configured_storage = ConfiguredStorage()
| bsd-3-clause |
eyesnears/ardupilot | Tools/autotest/pysim/util.py | 129 | 13700 | import math
from math import sqrt, acos, cos, pi, sin, atan2
import os, sys, time, random
from rotmat import Vector3, Matrix3
from subprocess import call, check_call,Popen, PIPE
def m2ft(x):
'''meters to feet'''
return float(x) / 0.3048
def ft2m(x):
'''feet to meters'''
return float(x) * 0.3048
def kt2mps(x):
return x * 0.514444444
def mps2kt(x):
return x / 0.514444444
def topdir():
'''return top of git tree where autotest is running from'''
d = os.path.dirname(os.path.realpath(__file__))
assert(os.path.basename(d)=='pysim')
d = os.path.dirname(d)
assert(os.path.basename(d)=='autotest')
d = os.path.dirname(d)
assert(os.path.basename(d)=='Tools')
d = os.path.dirname(d)
return d
def reltopdir(path):
'''return a path relative to topdir()'''
return os.path.normpath(os.path.join(topdir(), path))
def run_cmd(cmd, dir=".", show=False, output=False, checkfail=True):
'''run a shell command'''
if show:
print("Running: '%s' in '%s'" % (cmd, dir))
if output:
return Popen([cmd], shell=True, stdout=PIPE, cwd=dir).communicate()[0]
elif checkfail:
return check_call(cmd, shell=True, cwd=dir)
else:
return call(cmd, shell=True, cwd=dir)
def rmfile(path):
'''remove a file if it exists'''
try:
os.unlink(path)
except Exception:
pass
def deltree(path):
'''delete a tree of files'''
run_cmd('rm -rf %s' % path)
def build_SIL(atype, target='sitl', j=1):
'''build desktop SIL'''
run_cmd("make clean",
dir=reltopdir(atype),
checkfail=True)
run_cmd("make -j%u %s" % (j, target),
dir=reltopdir(atype),
checkfail=True)
return True
# list of pexpect children to close on exit
close_list = []
def pexpect_autoclose(p):
'''mark for autoclosing'''
global close_list
close_list.append(p)
def pexpect_close(p):
'''close a pexpect child'''
global close_list
try:
p.close()
except Exception:
pass
try:
p.close(force=True)
except Exception:
pass
if p in close_list:
close_list.remove(p)
def pexpect_close_all():
'''close all pexpect children'''
global close_list
for p in close_list[:]:
pexpect_close(p)
def pexpect_drain(p):
'''drain any pending input'''
import pexpect
try:
p.read_nonblocking(1000, timeout=0)
except pexpect.TIMEOUT:
pass
def start_SIL(atype, valgrind=False, wipe=False, synthetic_clock=True, home=None, model=None, speedup=1):
'''launch a SIL instance'''
import pexpect
cmd=""
if valgrind and os.path.exists('/usr/bin/valgrind'):
cmd += 'valgrind -q --log-file=%s-valgrind.log ' % atype
executable = reltopdir('tmp/%s.build/%s.elf' % (atype, atype))
if not os.path.exists(executable):
executable = '/tmp/%s.build/%s.elf' % (atype, atype)
cmd += executable
if wipe:
cmd += ' -w'
if synthetic_clock:
cmd += ' -S'
if home is not None:
cmd += ' --home=%s' % home
if model is not None:
cmd += ' --model=%s' % model
if speedup != 1:
cmd += ' --speedup=%f' % speedup
print("Running: %s" % cmd)
ret = pexpect.spawn(cmd, logfile=sys.stdout, timeout=5)
ret.delaybeforesend = 0
pexpect_autoclose(ret)
ret.expect('Waiting for connection')
return ret
def start_MAVProxy_SIL(atype, aircraft=None, setup=False, master='tcp:127.0.0.1:5760',
options=None, logfile=sys.stdout):
'''launch mavproxy connected to a SIL instance'''
import pexpect
global close_list
MAVPROXY = os.getenv('MAVPROXY_CMD', 'mavproxy.py')
cmd = MAVPROXY + ' --master=%s --out=127.0.0.1:14550' % master
if setup:
cmd += ' --setup'
if aircraft is None:
aircraft = 'test.%s' % atype
cmd += ' --aircraft=%s' % aircraft
if options is not None:
cmd += ' ' + options
ret = pexpect.spawn(cmd, logfile=logfile, timeout=60)
ret.delaybeforesend = 0
pexpect_autoclose(ret)
return ret
def expect_setup_callback(e, callback):
'''setup a callback that is called once a second while waiting for
patterns'''
import pexpect
def _expect_callback(pattern, timeout=e.timeout):
tstart = time.time()
while time.time() < tstart + timeout:
try:
ret = e.expect_saved(pattern, timeout=1)
return ret
except pexpect.TIMEOUT:
e.expect_user_callback(e)
pass
print("Timed out looking for %s" % pattern)
raise pexpect.TIMEOUT(timeout)
e.expect_user_callback = callback
e.expect_saved = e.expect
e.expect = _expect_callback
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
os.mkdir(dir)
def loadfile(fname):
'''load a file as a string'''
f = open(fname, mode='r')
r = f.read()
f.close()
return r
def lock_file(fname):
'''lock a file'''
import fcntl
f = open(fname, mode='w')
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
return None
return f
def check_parent(parent_pid=None):
'''check our parent process is still alive'''
if parent_pid is None:
try:
parent_pid = os.getppid()
except Exception:
pass
if parent_pid is None:
return
try:
os.kill(parent_pid, 0)
except Exception:
print("Parent had finished - exiting")
sys.exit(1)
def EarthRatesToBodyRates(dcm, earth_rates):
'''convert the angular velocities from earth frame to
body frame. Thanks to James Goppert for the formula
all inputs and outputs are in radians
returns a gyro vector in body frame, in rad/s
'''
from math import sin, cos
(phi, theta, psi) = dcm.to_euler()
phiDot = earth_rates.x
thetaDot = earth_rates.y
psiDot = earth_rates.z
p = phiDot - psiDot*sin(theta)
q = cos(phi)*thetaDot + sin(phi)*psiDot*cos(theta)
r = cos(phi)*psiDot*cos(theta) - sin(phi)*thetaDot
return Vector3(p, q, r)
def BodyRatesToEarthRates(dcm, gyro):
'''convert the angular velocities from body frame to
earth frame.
all inputs and outputs are in radians/s
returns a earth rate vector
'''
from math import sin, cos, tan, fabs
p = gyro.x
q = gyro.y
r = gyro.z
(phi, theta, psi) = dcm.to_euler()
phiDot = p + tan(theta)*(q*sin(phi) + r*cos(phi))
thetaDot = q*cos(phi) - r*sin(phi)
if fabs(cos(theta)) < 1.0e-20:
theta += 1.0e-10
psiDot = (q*sin(phi) + r*cos(phi))/cos(theta)
return Vector3(phiDot, thetaDot, psiDot)
radius_of_earth = 6378100.0 # in meters
def gps_newpos(lat, lon, bearing, distance):
'''extrapolate latitude/longitude given a heading and distance
thanks to http://www.movable-type.co.uk/scripts/latlong.html
'''
from math import sin, asin, cos, atan2, radians, degrees
lat1 = radians(lat)
lon1 = radians(lon)
brng = radians(bearing)
dr = distance/radius_of_earth
lat2 = asin(sin(lat1)*cos(dr) +
cos(lat1)*sin(dr)*cos(brng))
lon2 = lon1 + atan2(sin(brng)*sin(dr)*cos(lat1),
cos(dr)-sin(lat1)*sin(lat2))
return (degrees(lat2), degrees(lon2))
def gps_distance(lat1, lon1, lat2, lon2):
'''return distance between two points in meters,
coordinates are in degrees
thanks to http://www.movable-type.co.uk/scripts/latlong.html'''
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = math.sin(0.5*dLat)**2 + math.sin(0.5*dLon)**2 * math.cos(lat1) * math.cos(lat2)
c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0-a))
return radius_of_earth * c
def gps_bearing(lat1, lon1, lat2, lon2):
'''return bearing between two points in degrees, in range 0-360
thanks to http://www.movable-type.co.uk/scripts/latlong.html'''
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
y = math.sin(dLon) * math.cos(lat2)
x = math.cos(lat1)*math.sin(lat2) - math.sin(lat1)*math.cos(lat2)*math.cos(dLon)
bearing = math.degrees(math.atan2(y, x))
if bearing < 0:
bearing += 360.0
return bearing
class Wind(object):
'''a wind generation object'''
def __init__(self, windstring, cross_section=0.1):
a = windstring.split(',')
if len(a) != 3:
raise RuntimeError("Expected wind in speed,direction,turbulance form, not %s" % windstring)
self.speed = float(a[0]) # m/s
self.direction = float(a[1]) # direction the wind is going in
self.turbulance= float(a[2]) # turbulance factor (standard deviation)
# the cross-section of the aircraft to wind. This is multiplied by the
# difference in the wind and the velocity of the aircraft to give the acceleration
self.cross_section = cross_section
# the time constant for the turbulance - the average period of the
# changes over time
self.turbulance_time_constant = 5.0
# wind time record
self.tlast = time.time()
# initial turbulance multiplier
self.turbulance_mul = 1.0
def current(self, deltat=None):
'''return current wind speed and direction as a tuple
speed is in m/s, direction in degrees
'''
if deltat is None:
tnow = time.time()
deltat = tnow - self.tlast
self.tlast = tnow
# update turbulance random walk
w_delta = math.sqrt(deltat)*(1.0-random.gauss(1.0, self.turbulance))
w_delta -= (self.turbulance_mul-1.0)*(deltat/self.turbulance_time_constant)
self.turbulance_mul += w_delta
speed = self.speed * math.fabs(self.turbulance_mul)
return (speed, self.direction)
# Calculate drag.
def drag(self, velocity, deltat=None, testing=None):
'''return current wind force in Earth frame. The velocity parameter is
a Vector3 of the current velocity of the aircraft in earth frame, m/s'''
from math import radians
# (m/s, degrees) : wind vector as a magnitude and angle.
(speed, direction) = self.current(deltat=deltat)
# speed = self.speed
# direction = self.direction
# Get the wind vector.
w = toVec(speed, radians(direction))
obj_speed = velocity.length()
# Compute the angle between the object vector and wind vector by taking
# the dot product and dividing by the magnitudes.
d = w.length() * obj_speed
if d == 0:
alpha = 0
else:
alpha = acos((w * velocity) / d)
# Get the relative wind speed and angle from the object. Note that the
# relative wind speed includes the velocity of the object; i.e., there
# is a headwind equivalent to the object's speed even if there is no
# absolute wind.
(rel_speed, beta) = apparent_wind(speed, obj_speed, alpha)
# Return the vector of the relative wind, relative to the coordinate
# system.
relWindVec = toVec(rel_speed, beta + atan2(velocity.y, velocity.x))
# Combine them to get the acceleration vector.
return Vector3( acc(relWindVec.x, drag_force(self, relWindVec.x))
, acc(relWindVec.y, drag_force(self, relWindVec.y))
, 0 )
# http://en.wikipedia.org/wiki/Apparent_wind
#
# Returns apparent wind speed and angle of apparent wind. Alpha is the angle
# between the object and the true wind. alpha of 0 rads is a headwind; pi a
# tailwind. Speeds should always be positive.
def apparent_wind(wind_sp, obj_speed, alpha):
delta = wind_sp * cos(alpha)
x = wind_sp**2 + obj_speed**2 + 2 * obj_speed * delta
rel_speed = sqrt(x)
if rel_speed == 0:
beta = pi
else:
beta = acos((delta + obj_speed) / rel_speed)
return (rel_speed, beta)
# See http://en.wikipedia.org/wiki/Drag_equation
#
# Drag equation is F(a) = cl * p/2 * v^2 * a, where cl : drag coefficient
# (let's assume it's low, .e.g., 0.2), p : density of air (assume about 1
# kg/m^3, the density just over 1500m elevation), v : relative speed of wind
# (to the body), a : area acted on (this is captured by the cross_section
# paramter).
#
# So then we have
# F(a) = 0.2 * 1/2 * v^2 * cross_section = 0.1 * v^2 * cross_section
def drag_force(wind, sp):
return (sp**2.0) * 0.1 * wind.cross_section
# Function to make the force vector. relWindVec is the direction the apparent
# wind comes *from*. We want to compute the accleration vector in the direction
# the wind blows to.
def acc(val, mag):
if val == 0:
return mag
else:
return (val / abs(val)) * (0 - mag)
# Converts a magnitude and angle (radians) to a vector in the xy plane.
def toVec(magnitude, angle):
v = Vector3(magnitude, 0, 0)
m = Matrix3()
m.from_euler(0, 0, angle)
return m.transposed() * v
def constrain(value, minv, maxv):
'''constrain a value to a range'''
if value < minv:
value = minv
if value > maxv:
value = maxv
return value
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 |
Avanish14/SmartModem | AMrx.py | 1 | 3114 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Amrx
# Generated: Tue Aug 8 20:51:18 2017
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import time, sys
class AMrx(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "Amrx")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 2500000
##################################################
# Blocks
##################################################
self.uhd_usrp_source_0 = uhd.usrp_source(
",".join(("", "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_source_0.set_samp_rate(samp_rate)
self.uhd_usrp_source_0.set_center_freq(435000000, 0)
self.uhd_usrp_source_0.set_gain(80, 0)
self.uhd_usrp_source_0.set_antenna('TX/RX', 0)
self.uhd_usrp_source_0.set_bandwidth(100000, 0)
self.rational_resampler_xxx_0 = filter.rational_resampler_ccc(
interpolation=44100,
decimation=2500000,
taps=None,
fractional_bw=None,
)
self.blocks_wavfile_sink_0 = blocks.wavfile_sink(sys.argv[1], 1, 44100, 8)
self.analog_am_demod_cf_0 = analog.am_demod_cf(
channel_rate=44100,
audio_decim=1,
audio_pass=20000,
audio_stop=21000,
)
self.analog_agc2_xx_0 = analog.agc2_cc(.1, 1e-6, 1.0, 0)
self.analog_agc2_xx_0.set_max_gain(5)
##################################################
# Connections
##################################################
self.connect((self.analog_agc2_xx_0, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.analog_am_demod_cf_0, 0), (self.blocks_wavfile_sink_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.analog_am_demod_cf_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.analog_agc2_xx_0, 0))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_source_0.set_samp_rate(self.samp_rate)
def main(top_block_cls=AMrx, options=None):
tb = top_block_cls()
tb.start()
print('Receiving on ' + str(tb.uhd_usrp_source_0.get_center_freq()) + 'Hz with a channel bandwidth of ' + str(tb.uhd_usrp_source_0.get_bandwidth()) + 'Hz')
try:
raw_input('Press Enter to quit: ')
except EOFError:
pass
tb.stop()
tb.wait()
print('.wav file generated')
if __name__ == '__main__':
main()
| mit |
flgiordano/netcash | +/google-cloud-sdk/lib/surface/bigtable/clusters/create.py | 1 | 2807 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""bigtable clusters create command."""
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
class CreateCluster(base.Command):
"""Create a new Bigtable cluster."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
util.AddClusterIdArgs(parser)
util.AddClusterInfoArgs(parser)
parser.add_argument(
'--storage',
choices=['HDD', 'SSD'],
default='SSD',
type=str.upper,
help='Storage class for the cluster. Valid options are HDD or SSD.')
@util.MapHttpError
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
cli = self.context['clusteradmin']
cluster = self.context['clusteradmin-msgs'].Cluster
storage_options = {
'HDD': cluster.DefaultStorageTypeValueValuesEnum.STORAGE_HDD,
'SSD': cluster.DefaultStorageTypeValueValuesEnum.STORAGE_SSD}
msg = self.context['clusteradmin-msgs'].CreateClusterRequest(
name=util.ZoneUrl(args),
clusterId=args.cluster,
cluster=cluster(
displayName=args.description,
serveNodes=args.nodes,
defaultStorageType=storage_options[args.storage]))
result = cli.projects_zones_clusters.Create(msg)
if not args.async:
util.WaitForOp(
self.context,
result.currentOperation.name,
'Creating cluster')
return result
def Display(self, args, result):
"""This method is called to print the result of the Run() method.
Args:
args: The arguments that command was run with.
result: The value returned from the Run() method.
"""
# Always use this log module for printing (never use print directly).
# This allows us to control the verbosity of commands in a global way.
writer = log.out
writer.Print('Cluster [{0}] in zone [{1}] creat{2}.'.format(
args.cluster, args.zone, 'ion in progress' if args.async else 'ed'))
| bsd-3-clause |
yannickcr/Sick-Beard | sickbeard/clients/requests/utils.py | 204 | 17497 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import os
import platform
import re
import sys
import zlib
from netrc import netrc, NetrcParseError
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import quote, urlparse, bytes, str, OrderedDict, urlunparse
from .cookies import RequestsCookieJar, cookiejar_from_dict
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
return os.fstat(o.fileno()).st_size
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES)
netrc_path = None
for loc in locations:
if os.path.exists(loc) and not netrc_path:
netrc_path = loc
# Abort early if there isn't one.
if netrc_path is None:
return netrc_path
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, dict):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
return charset_re.findall(content)
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode('', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
def stream_decompress(iterator, mode='gzip'):
"""Stream decodes an iterator over compressed data
:param iterator: An iterator over compressed data
:param mode: 'gzip' or 'deflate'
:return: An iterator over decompressed data
"""
if mode not in ['gzip', 'deflate']:
raise ValueError('stream_decompress mode must be gzip or deflate')
zlib_mode = 16 + zlib.MAX_WBITS if mode == 'gzip' else -zlib.MAX_WBITS
dec = zlib.decompressobj(zlib_mode)
try:
for chunk in iterator:
rv = dec.decompress(chunk)
if rv:
yield rv
except zlib.error:
# If there was an error decompressing, just return the raw chunk
yield chunk
# Continue to return the rest of the raw data
for chunk in iterator:
yield chunk
else:
# Make sure everything has been returned from the decompression object
buf = dec.decompress(bytes())
rv = buf + dec.flush()
if rv:
yield rv
def stream_untransfer(gen, resp):
ce = resp.headers.get('content-encoding', '').lower()
if 'gzip' in ce:
gen = stream_decompress(gen, mode='gzip')
elif 'deflate' in ce:
gen = stream_decompress(gen, mode='deflate')
return gen
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
c = chr(int(h, 16))
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
proxy_keys = [
'all',
'http',
'https',
'ftp',
'socks'
]
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.split(',')
netloc = urlparse(url).netloc
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
# If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to.
proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys]
return dict([(key, val) for (key, val) in proxies if val])
def default_user_agent():
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['python-requests/%s' % __version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return {
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')),
'Accept': '*/*'
}
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
if url:
parsed = urlparse(url)
return (parsed.username, parsed.password)
else:
return ('', '')
| gpl-3.0 |
Drooids/odoo | addons/base_import/test_models.py | 399 | 2289 | from openerp.osv import orm, fields
def name(n): return 'base_import.tests.models.%s' % n
class char(orm.Model):
_name = name('char')
_columns = {
'value': fields.char('unknown')
}
class char_required(orm.Model):
_name = name('char.required')
_columns = {
'value': fields.char('unknown', required=True)
}
class char_readonly(orm.Model):
_name = name('char.readonly')
_columns = {
'value': fields.char('unknown', readonly=True)
}
class char_states(orm.Model):
_name = name('char.states')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('readonly', False)]})
}
class char_noreadonly(orm.Model):
_name = name('char.noreadonly')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('invisible', True)]})
}
class char_stillreadonly(orm.Model):
_name = name('char.stillreadonly')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('readonly', True)]})
}
# TODO: complex field (m2m, o2m, m2o)
class m2o(orm.Model):
_name = name('m2o')
_columns = {
'value': fields.many2one(name('m2o.related'))
}
class m2o_related(orm.Model):
_name = name('m2o.related')
_columns = {
'value': fields.integer()
}
_defaults = {
'value': 42
}
class m2o_required(orm.Model):
_name = name('m2o.required')
_columns = {
'value': fields.many2one(name('m2o.required.related'), required=True)
}
class m2o_required_related(orm.Model):
_name = name('m2o.required.related')
_columns = {
'value': fields.integer()
}
_defaults = {
'value': 42
}
class o2m(orm.Model):
_name = name('o2m')
_columns = {
'value': fields.one2many(name('o2m.child'), 'parent_id')
}
class o2m_child(orm.Model):
_name = name('o2m.child')
_columns = {
'parent_id': fields.many2one(name('o2m')),
'value': fields.integer()
}
class preview_model(orm.Model):
_name = name('preview')
_columns = {
'name': fields.char('Name'),
'somevalue': fields.integer('Some Value', required=True),
'othervalue': fields.integer('Other Variable'),
}
| agpl-3.0 |
PeterWangIntel/chromium-crosswalk | build/android/pylib/chrome_test_server_spawner.py | 52 | 15678 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
# pylint: disable=W0702
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import sys
import threading
import time
import urlparse
from pylib import constants
from pylib import ports
from pylib.forwarder import Forwarder
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(constants.DIR_SOURCE_ROOT, 'third_party'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib',
'src'),
os.path.join(constants.DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
os.path.join(constants.DIR_SOURCE_ROOT, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _WaitUntil(predicate, max_attempts=5):
"""Blocks until the provided predicate (function) is true.
Returns:
Whether the provided predicate was satisfied once (before the timeout).
"""
sleep_time_sec = 0.025
for _ in xrange(1, max_attempts):
if predicate():
return True
time.sleep(sleep_time_sec)
sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
return False
def _CheckPortAvailable(port):
"""Returns True if |port| is available."""
return _WaitUntil(lambda: ports.IsHostPortAvailable(port))
def _CheckPortNotAvailable(port):
"""Returns True if |port| is not available."""
return _WaitUntil(lambda: not ports.IsHostPortAvailable(port))
def _CheckDevicePortStatus(device, port):
"""Returns whether the provided port is used."""
return _WaitUntil(lambda: ports.IsDevicePortUsed(device, port))
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, device, tool):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
device: An instance of DeviceUtils.
tool: instance of runtime error detection tool.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.device = device
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.process = None
self.command_line = []
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortNotAvailable(self.host_port)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
args_copy = dict(self.arguments)
# Translate the server type.
type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type'))
if type_cmd:
self.command_line.append(type_cmd)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
assert self.host_port == args_copy['port']
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
# Pass the remaining arguments as-is.
for key, values in args_copy.iteritems():
if not isinstance(values, list):
values = [values]
for value in values:
if value is None:
self.command_line.append('--%s' % key)
else:
self.command_line.append('--%s=%s' % (key, value))
def _CloseUnnecessaryFDsForTestServerProcess(self):
# This is required to avoid subtle deadlocks that could be caused by the
# test server child process inheriting undesirable file descriptors such as
# file lock file descriptors.
for fd in xrange(0, 1024):
if fd != self.pipe_out:
try:
os.close(fd)
except:
pass
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = constants.DIR_SOURCE_ROOT
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
# Pass DIR_SOURCE_ROOT as the child's working directory so that relative
# paths in the arguments are resolved correctly.
self.process = subprocess.Popen(
command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess,
cwd=constants.DIR_SOURCE_ROOT)
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortNotAvailable(self.host_port)
if self.is_ready:
Forwarder.Map([(0, self.host_port)], self.device, self.tool)
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = Forwarder.DevicePortForHostPort(self.host_port)
if device_port and _CheckDevicePortStatus(self.device, device_port):
self.is_ready = True
self.forwarder_device_port = device_port
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
_WaitUntil(lambda: self.stop_flag, max_attempts=sys.maxint)
if self.process.poll() is None:
self.process.kill()
Forwarder.UnmapDevicePort(self.forwarder_device_port, self.device)
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.device,
self.server.tool)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortAvailable(port):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, device, tool):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server.device = device
self.server.tool = tool
self.server.test_server_instance = None
self.server.build_type = constants.GetBuildType()
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
| bsd-3-clause |
kybriainfotech/iSocioCRM | addons/product/_common.py | 316 | 1448 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
def rounding(f, r):
# TODO for trunk: log deprecation warning
# _logger.warning("Deprecated rounding method, please use tools.float_round to round floats.")
return tools.float_round(f, precision_rounding=r)
# TODO for trunk: add rounding method parameter to tools.float_round and use this method as hook
def ceiling(f, r):
if not r:
return f
return tools.float_round(f, precision_rounding=r, rounding_method='UP')
| agpl-3.0 |
unomena/django-photologue | photologue/management/commands/plcache.py | 25 | 1406 | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from photologue.models import PhotoSize, ImageModel
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--reset', '-r', action='store_true', dest='reset', help='Reset photo cache before generating'),
)
help = ('Manages Photologue cache file for the given sizes.')
args = '[sizes]'
requires_model_validation = True
can_import_settings = True
def handle(self, *args, **options):
return create_cache(args, options)
def create_cache(sizes, options):
"""
Creates the cache for the given files
"""
reset = options.get('reset', None)
size_list = [size.strip(' ,') for size in sizes]
if len(size_list) < 1:
sizes = PhotoSize.objects.filter(pre_cache=True)
else:
sizes = PhotoSize.objects.filter(name__in=size_list)
if not len(sizes):
raise CommandError('No photo sizes were found.')
print 'Caching photos, this may take a while...'
for cls in ImageModel.__subclasses__():
for photosize in sizes:
print 'Cacheing %s size images' % photosize.name
for obj in cls.objects.all():
if reset:
obj.remove_size(photosize)
obj.create_size(photosize)
| bsd-3-clause |
shellderp/sublime-robot-plugin | lib/robot/utils/asserts.py | 4 | 9599 | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience functions for testing both in unit and higher levels.
Benefits:
- Integrates 100% with unittest (see example below)
- Can be easily used without unittest (using unittest.TestCase when you
only need convenient asserts is not so nice)
- Saved typing and shorter lines because no need to have 'self.' before
asserts. These are static functions after all so that is OK.
- All 'equals' methods (by default) report given values even if optional
message given. This behavior can be controlled with the optional values
argument.
Drawbacks:
- unittest is not able to filter as much non-interesting traceback away
as with its own methods because AssertionErrors occur outside
Most of the functions are copied more or less directly from unittest.TestCase
which comes with the following license. Further information about unittest in
general can be found from http://pyunit.sourceforge.net/. This module can be
used freely in same terms as unittest.
unittest license::
Copyright (c) 1999-2003 Steve Purcell
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Examples:
.. code-block:: python
import unittest
from robot.util.asserts import *
class MyTests(unittest.TestCase):
def test_old_style(self):
self.assertEquals(1, 2, 'my msg')
def test_new_style(self):
assert_equals(1, 2, 'my msg')
Example output::
FF
======================================================================
FAIL: test_old_style (__main__.MyTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File "example.py", line 7, in test_old_style
self.assertEquals(1, 2, 'my msg')
AssertionError: my msg
======================================================================
FAIL: test_new_style (__main__.MyTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File "example.py", line 10, in test_new_style
assert_equals(1, 2, 'my msg')
File "/path/to/robot/asserts.py", line 142, in fail_unless_equal
_report_unequality_failure(first, second, msg, values, '!=')
File "/path/to/robot/src/robot/asserts.py", line 209, in _report_unequality_failure
raise _report_failure(msg)
File "/path/to/robot/src/robot/asserts.py", line 200, in _report_failure
raise AssertionError(msg)
AssertionError: my msg: 1 != 2
----------------------------------------------------------------------
Ran 2 tests in 0.000s
FAILED (failures=2)
"""
from .unic import unic
def fail(msg=None):
"""Fail test immediately with the given message."""
_report_failure(msg)
def error(msg=None):
"""Error test immediately with the given message."""
_report_error(msg)
def fail_if(expr, msg=None):
"""Fail the test if the expression is True."""
if expr: _report_failure(msg)
def fail_unless(expr, msg=None):
"""Fail the test unless the expression is True."""
if not expr: _report_failure(msg)
def fail_if_none(obj, msg=None, values=True):
"""Fail the test if given object is None."""
_msg = 'is None'
if obj is None:
if msg is None:
msg = _msg
elif values is True:
msg = '%s: %s' % (msg, _msg)
_report_failure(msg)
def fail_unless_none(obj, msg=None, values=True):
"""Fail the test if given object is not None."""
_msg = '%r is not None' % obj
if obj is not None:
if msg is None:
msg = _msg
elif values is True:
msg = '%s: %s' % (msg, _msg)
_report_failure(msg)
def fail_unless_raises(exc_class, callable_obj, *args, **kwargs):
"""Fail unless an exception of class exc_class is thrown by callable_obj.
callable_obj is invoked with arguments args and keyword arguments
kwargs. If a different type of exception is thrown, it will not be
caught, and the test case will be deemed to have suffered an
error, exactly as for an unexpected exception.
If a correct exception is raised, the exception instance is returned
by this method.
"""
try:
callable_obj(*args, **kwargs)
except exc_class, err:
return err
else:
if hasattr(exc_class,'__name__'):
exc_name = exc_class.__name__
else:
exc_name = str(exc_class)
_report_failure('%s not raised' % exc_name)
def fail_unless_raises_with_msg(exc_class, expected_msg, callable_obj, *args,
**kwargs):
"""Similar to fail_unless_raises but also checks the exception message."""
try:
callable_obj(*args, **kwargs)
except exc_class, err:
assert_equal(expected_msg, unic(err), 'Correct exception but wrong message')
else:
if hasattr(exc_class,'__name__'):
exc_name = exc_class.__name__
else:
exc_name = str(exc_class)
_report_failure('%s not raised' % exc_name)
def fail_unless_equal(first, second, msg=None, values=True):
"""Fail if given objects are unequal as determined by the '==' operator."""
if not first == second:
_report_unequality_failure(first, second, msg, values, '!=')
def fail_if_equal(first, second, msg=None, values=True):
"""Fail if given objects are equal as determined by the '==' operator."""
if first == second:
_report_unequality_failure(first, second, msg, values, '==')
def fail_unless_almost_equal(first, second, places=7, msg=None, values=True):
"""Fail if the two objects are unequal after rounded to given places.
Unequality is determined by object's difference rounded to the
given number of decimal places (default 7) and comparing to zero.
Note that decimal places (from zero) are usually not the same as
significant digits (measured from the most signficant digit).
"""
if round(second - first, places) != 0:
extra = 'within %r places' % places
_report_unequality_failure(first, second, msg, values, '!=', extra)
def fail_if_almost_equal(first, second, places=7, msg=None, values=True):
"""Fail if the two objects are unequal after rounded to given places.
Equality is determined by object's difference rounded to to the
given number of decimal places (default 7) and comparing to zero.
Note that decimal places (from zero) are usually not the same as
significant digits (measured from the most signficant digit).
"""
if round(second-first, places) == 0:
extra = 'within %r places' % places
_report_unequality_failure(first, second, msg, values, '==', extra)
# Synonyms for assertion methods
assert_equal = assert_equals = fail_unless_equal
assert_not_equal = assert_not_equals = fail_if_equal
assert_almost_equal = assert_almost_equals = fail_unless_almost_equal
assert_not_almost_equal = assert_not_almost_equals = fail_if_almost_equal
assert_raises = fail_unless_raises
assert_raises_with_msg = fail_unless_raises_with_msg
assert_ = assert_true = fail_unless
assert_false = fail_if
assert_none = fail_unless_none
assert_not_none = fail_if_none
# Helpers
def _report_failure(msg):
if msg is None:
raise AssertionError()
raise AssertionError(msg)
def _report_error(msg):
if msg is None:
raise Exception()
raise Exception(msg)
def _report_unequality_failure(obj1, obj2, msg, values, delim, extra=None):
if not msg:
msg = _get_default_message(obj1, obj2, delim)
elif values:
msg = '%s: %s' % (msg, _get_default_message(obj1, obj2, delim))
if values and extra:
msg += ' ' + extra
_report_failure(msg)
def _get_default_message(obj1, obj2, delim):
str1 = unic(obj1)
str2 = unic(obj2)
if delim == '!=' and str1 == str2:
return '%s (%s) != %s (%s)' % (str1, _type_name(obj1),
str2, _type_name(obj2))
return '%s %s %s' % (str1, delim, str2)
def _type_name(val):
known_types = {int: 'number', long: 'number', float: 'number',
str: 'string', unicode: 'string', bool: 'boolean'}
return known_types.get(type(val), type(val).__name__)
| apache-2.0 |
NeCTAR-RC/neutron | neutron/plugins/sriovnicagent/sriov_nic_agent.py | 2 | 14524 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as q_constants
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.i18n import _LE, _LI
from neutron.openstack.common import loopingcall
from neutron.plugins.sriovnicagent.common import config # noqa
from neutron.plugins.sriovnicagent.common import exceptions as exc
from neutron.plugins.sriovnicagent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
target = oslo_messaging.Target(version='1.1')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.agent.updated_devices.add(port['mac_address'])
LOG.debug("port_update RPC received for port: %s", port['id'])
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval):
self.polling_interval = polling_interval
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
configurations = {'device_mappings': physical_devices_mappings}
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': cfg.CONF.host,
'topic': q_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': q_constants.AGENT_TYPE_NIC_SWITCH,
'start_flag': True}
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc)
self._setup_rpc()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_eswitch_mgr(self, device_mappings, exclude_devices={}):
self.eswitch_mgr = esm.ESwitchManager(device_mappings, exclude_devices)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = updated_devices & curr_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.SriovNicError:
LOG.exception(_LE("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC address %(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
profile = device_details['profile']
self.treat_device(device_details['device'],
profile.get('pci_slot'),
device_details['admin_state_up'])
else:
LOG.info(_LI("Device with MAC %s not defined on plugin"),
device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_LI("Removing device with mac_address %s"), device)
try:
dev_details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device %(device)s "
"due to %(exc)s", {'device': device, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_LI("Port %s updated."), device)
else:
LOG.debug("Device %s not defined on plugin", device)
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = q_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(self.device_mappings.values())
for dev_name in self.exclude_devices.iterkeys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception(_LE("Failed on Agent configuration parse. "
"Agent terminated!"))
raise SystemExit(1)
LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval)
except exc.SriovNicError:
LOG.exception(_LE("Agent Initialization Failed"))
raise SystemExit(1)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
if __name__ == '__main__':
main()
| apache-2.0 |
40223244/cdb-1 | static/Brython3.1.1-20150328-091302/Lib/xml/sax/xmlreader.py | 824 | 12612 | """An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
from . import handler
from ._exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must raise a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
from . import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer:
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getQNameByName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getNames(self):
return list(self._attrs.keys())
def getQNames(self):
return list(self._attrs.keys())
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return list(self._attrs.keys())
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return list(self._attrs.items())
def values(self):
return list(self._attrs.values())
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError(name)
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError(name)
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return list(self._qnames.values())
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
| gpl-3.0 |
dawnpower/nova | nova/db/sqlalchemy/migrate_repo/versions/227_fix_project_user_quotas_resource_length.py | 52 | 1528 | # Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
table = Table('project_user_quotas', meta, autoload=True)
col_resource = getattr(table.c, 'resource')
if col_resource.type.length == 25:
# The resource of project_user_quotas table had been changed to
# invalid length(25) since I56ad98d3702f53fe8cfa94093fea89074f7a5e90.
# The following code fixes the length for the environments which are
# deployed after I56ad98d3702f53fe8cfa94093fea89074f7a5e90.
col_resource.alter(type=String(255))
table.update().where(table.c.resource == 'injected_file_content_byt')\
.values(resource='injected_file_content_bytes').execute()
def downgrade(migrate_engine):
# This migration fixes the resource of project_user_quotas table.
# No need to go back and reverse this change.
pass
| apache-2.0 |
cristiana214/cristianachavez214-cristianachavez | python/gdata/tests/gdata_tests/auth_test.py | 126 | 26859 | #!/usr/bin/env python
#
# Copyright (C) 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import re
import unittest
import urllib
import gdata.auth
CONSUMER_KEY = 'www.yourwebapp.com'
CONSUMER_SECRET = 'qB1P2kCFDpRjF+/Iww4'
RSA_KEY = """-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQDVbOaFW+KXecfFJn1PIzYHnNXFxhaQ36QM0K5uSb0Y8NeQUlD2
6t8aKgnm6mcb4vaopHjjdIGWgAzM5Dt0oPIiDXo+jSQbvCIXRduuAt+0cFGb2d+L
hALk4AwB8IVIkDJWwgo5Z2OLsP2r/wQlUYKm/tnvQaevK24jNYMLWVJl2QIDAQAB
AoGAU93ERBlUVEPFjaJPUX67p4gotNvfWDSZiXOjZ7FQPnG9s3e1WyH2Y5irZXMs
61dnp+NhobfRiGtvHEB/YJgyLRk/CJDnMKslo95e7o65IE9VkcyY6Yvt7YTslsRX
Eu7T0xLEA7ON46ypCwNLeWxpJ9SWisEKu2yZJnWauCXEsgUCQQD7b2ZuhGx3msoP
YEnwvucp0UxneCvb68otfERZ1J6NfNP47QJw6OwD3r1sWCJ27QZmpvtQH1f8sCk9
t22anGG7AkEA2UzXdtQ8H1uLAN/XXX2qoLuvJK5jRswHS4GeOg4pnnDSiHg3Vbva
AxmMIL93ufvIy/xdoENwDPfcI4CbYlrDewJAGWy7W+OSIEoLsqBW+bwkHetnIXNa
ZAOkzxKoyrigS8hamupEe+xhqUaFuwXyfjobkpfCA+kXeZrKoM4CjEbR7wJAHMbf
Vd4/ZAu0edYq6DenLAgO5rWtcge9A5PTx25utovMZcQ917273mM4unGAwoGEkvcF
0x57LUx5u73hVgIdFwJBAKWGuHRwGPgTWYvhpHM0qveH+8KdU9BUt/kV4ONxIVDB
ftetEmJirqOGLECbImoLcUwQrgfMW4ZCxOioJMz/gY0=
-----END RSA PRIVATE KEY-----
"""
class AuthModuleUtilitiesTest(unittest.TestCase):
def testGenerateClientLoginRequestBody(self):
body = gdata.auth.GenerateClientLoginRequestBody('jo@gmail.com',
'password', 'test service', 'gdata.auth test')
expected_parameters = {'Email':r'jo%40gmail.com', 'Passwd':'password',
'service':'test+service', 'source':'gdata.auth+test',
'accountType':'HOSTED_OR_GOOGLE'}
self.__matchBody(body, expected_parameters)
body = gdata.auth.GenerateClientLoginRequestBody('jo@gmail.com',
'password', 'test service', 'gdata.auth test', account_type='A TEST',
captcha_token='12345', captcha_response='test')
expected_parameters['accountType'] = 'A+TEST'
expected_parameters['logintoken'] = '12345'
expected_parameters['logincaptcha'] = 'test'
self.__matchBody(body, expected_parameters)
def __matchBody(self, body, expected_name_value_pairs):
parameters = body.split('&')
for param in parameters:
(name, value) = param.split('=')
self.assert_(expected_name_value_pairs[name] == value)
def testGenerateClientLoginAuthToken(self):
http_body = ('SID=DQAAAGgA7Zg8CTN\r\n'
'LSID=DQAAAGsAlk8BBbG\r\n'
'Auth=DQAAAGgAdk3fA5N')
self.assert_(gdata.auth.GenerateClientLoginAuthToken(http_body) ==
'GoogleLogin auth=DQAAAGgAdk3fA5N')
class GenerateClientLoginRequestBodyTest(unittest.TestCase):
def testPostBodyShouldMatchShortExample(self):
auth_body = gdata.auth.GenerateClientLoginRequestBody('johndoe@gmail.com',
'north23AZ', 'cl', 'Gulp-CalGulp-1.05')
self.assert_(-1 < auth_body.find('Email=johndoe%40gmail.com'))
self.assert_(-1 < auth_body.find('Passwd=north23AZ'))
self.assert_(-1 < auth_body.find('service=cl'))
self.assert_(-1 < auth_body.find('source=Gulp-CalGulp-1.05'))
def testPostBodyShouldMatchLongExample(self):
auth_body = gdata.auth.GenerateClientLoginRequestBody('johndoe@gmail.com',
'north23AZ', 'cl', 'Gulp-CalGulp-1.05',
captcha_token='DQAAAGgA...dkI1', captcha_response='brinmar')
self.assert_(-1 < auth_body.find('logintoken=DQAAAGgA...dkI1'))
self.assert_(-1 < auth_body.find('logincaptcha=brinmar'))
def testEquivalenceWithOldLogic(self):
email = 'jo@gmail.com'
password = 'password'
account_type = 'HOSTED'
service = 'test'
source = 'auth test'
old_request_body = urllib.urlencode({'Email': email,
'Passwd': password,
'accountType': account_type,
'service': service,
'source': source})
new_request_body = gdata.auth.GenerateClientLoginRequestBody(email,
password, service, source, account_type=account_type)
for parameter in old_request_body.split('&'):
self.assert_(-1 < new_request_body.find(parameter))
class GenerateAuthSubUrlTest(unittest.TestCase):
def testDefaultParameters(self):
url = gdata.auth.GenerateAuthSubUrl('http://example.com/xyz?x=5',
'http://www.google.com/test/feeds')
self.assert_(-1 < url.find(
r'scope=http%3A%2F%2Fwww.google.com%2Ftest%2Ffeeds'))
self.assert_(-1 < url.find(
r'next=http%3A%2F%2Fexample.com%2Fxyz%3Fx%3D5'))
self.assert_(-1 < url.find('secure=0'))
self.assert_(-1 < url.find('session=1'))
def testAllParameters(self):
url = gdata.auth.GenerateAuthSubUrl('http://example.com/xyz?x=5',
'http://www.google.com/test/feeds', secure=True, session=False,
request_url='https://example.com/auth')
self.assert_(-1 < url.find(
r'scope=http%3A%2F%2Fwww.google.com%2Ftest%2Ffeeds'))
self.assert_(-1 < url.find(
r'next=http%3A%2F%2Fexample.com%2Fxyz%3Fx%3D5'))
self.assert_(-1 < url.find('secure=1'))
self.assert_(-1 < url.find('session=0'))
self.assert_(url.startswith('https://example.com/auth'))
class GenerateOAuthRequestTokenUrlTest(unittest.TestCase):
def testDefaultParameters(self):
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY,
rsa_key=RSA_KEY)
scopes = [
'http://abcd.example.com/feeds',
'http://www.example.com/abcd/feeds'
]
url = gdata.auth.GenerateOAuthRequestTokenUrl(
oauth_input_params, scopes=scopes)
self.assertEquals('https', url.protocol)
self.assertEquals('www.google.com', url.host)
self.assertEquals('/accounts/OAuthGetRequestToken', url.path)
self.assertEquals('1.0', url.params['oauth_version'])
self.assertEquals('RSA-SHA1', url.params['oauth_signature_method'])
self.assert_(url.params['oauth_nonce'])
self.assert_(url.params['oauth_timestamp'])
actual_scopes = url.params['scope'].split(' ')
self.assertEquals(2, len(actual_scopes))
for scope in actual_scopes:
self.assert_(scope in scopes)
self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key'])
self.assert_(url.params['oauth_signature'])
def testAllParameters(self):
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET)
scopes = ['http://abcd.example.com/feeds']
url = gdata.auth.GenerateOAuthRequestTokenUrl(
oauth_input_params, scopes=scopes,
request_token_url='https://www.example.com/accounts/OAuthRequestToken',
extra_parameters={'oauth_version': '2.0', 'my_param': 'my_value'})
self.assertEquals('https', url.protocol)
self.assertEquals('www.example.com', url.host)
self.assertEquals('/accounts/OAuthRequestToken', url.path)
self.assertEquals('2.0', url.params['oauth_version'])
self.assertEquals('HMAC-SHA1', url.params['oauth_signature_method'])
self.assert_(url.params['oauth_nonce'])
self.assert_(url.params['oauth_timestamp'])
actual_scopes = url.params['scope'].split(' ')
self.assertEquals(1, len(actual_scopes))
for scope in actual_scopes:
self.assert_(scope in scopes)
self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key'])
self.assert_(url.params['oauth_signature'])
self.assertEquals('my_value', url.params['my_param'])
class GenerateOAuthAuthorizationUrlTest(unittest.TestCase):
def testDefaultParameters(self):
token_key = 'ABCDDSFFDSG'
token_secret = 'SDFDSGSDADADSAF'
request_token = gdata.auth.OAuthToken(key=token_key, secret=token_secret)
url = gdata.auth.GenerateOAuthAuthorizationUrl(request_token)
self.assertEquals('https', url.protocol)
self.assertEquals('www.google.com', url.host)
self.assertEquals('/accounts/OAuthAuthorizeToken', url.path)
self.assertEquals(token_key, url.params['oauth_token'])
def testAllParameters(self):
token_key = 'ABCDDSFFDSG'
token_secret = 'SDFDSGSDADADSAF'
scopes = [
'http://abcd.example.com/feeds',
'http://www.example.com/abcd/feeds'
]
request_token = gdata.auth.OAuthToken(key=token_key, secret=token_secret,
scopes=scopes)
url = gdata.auth.GenerateOAuthAuthorizationUrl(
request_token,
authorization_url='https://www.example.com/accounts/OAuthAuthToken',
callback_url='http://www.yourwebapp.com/print',
extra_params={'permission': '1'},
include_scopes_in_callback=True, scopes_param_prefix='token_scope')
self.assertEquals('https', url.protocol)
self.assertEquals('www.example.com', url.host)
self.assertEquals('/accounts/OAuthAuthToken', url.path)
self.assertEquals(token_key, url.params['oauth_token'])
expected_callback_url = ('http://www.yourwebapp.com/print?'
'token_scope=http%3A%2F%2Fabcd.example.com%2Ffeeds'
'+http%3A%2F%2Fwww.example.com%2Fabcd%2Ffeeds')
self.assertEquals(expected_callback_url, url.params['oauth_callback'])
class GenerateOAuthAccessTokenUrlTest(unittest.TestCase):
def testDefaultParameters(self):
token_key = 'ABCDDSFFDSG'
token_secret = 'SDFDSGSDADADSAF'
authorized_request_token = gdata.auth.OAuthToken(key=token_key,
secret=token_secret)
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET)
url = gdata.auth.GenerateOAuthAccessTokenUrl(authorized_request_token,
oauth_input_params)
self.assertEquals('https', url.protocol)
self.assertEquals('www.google.com', url.host)
self.assertEquals('/accounts/OAuthGetAccessToken', url.path)
self.assertEquals(token_key, url.params['oauth_token'])
self.assertEquals('1.0', url.params['oauth_version'])
self.assertEquals('HMAC-SHA1', url.params['oauth_signature_method'])
self.assert_(url.params['oauth_nonce'])
self.assert_(url.params['oauth_timestamp'])
self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key'])
self.assert_(url.params['oauth_signature'])
def testAllParameters(self):
token_key = 'ABCDDSFFDSG'
authorized_request_token = gdata.auth.OAuthToken(key=token_key)
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY,
rsa_key=RSA_KEY)
url = gdata.auth.GenerateOAuthAccessTokenUrl(
authorized_request_token, oauth_input_params,
access_token_url='https://www.example.com/accounts/OAuthGetAccessToken',
oauth_version= '2.0')
self.assertEquals('https', url.protocol)
self.assertEquals('www.example.com', url.host)
self.assertEquals('/accounts/OAuthGetAccessToken', url.path)
self.assertEquals(token_key, url.params['oauth_token'])
self.assertEquals('2.0', url.params['oauth_version'])
self.assertEquals('RSA-SHA1', url.params['oauth_signature_method'])
self.assert_(url.params['oauth_nonce'])
self.assert_(url.params['oauth_timestamp'])
self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key'])
self.assert_(url.params['oauth_signature'])
class ExtractAuthSubTokensTest(unittest.TestCase):
def testGetTokenFromUrl(self):
url = 'http://www.yourwebapp.com/showcalendar.html?token=CKF50YzIH'
self.assert_(gdata.auth.AuthSubTokenFromUrl(url) ==
'AuthSub token=CKF50YzIH')
self.assert_(gdata.auth.TokenFromUrl(url) == 'CKF50YzIH')
url = 'http://www.yourwebapp.com/showcalendar.html?token==tokenCKF50YzIH='
self.assert_(gdata.auth.AuthSubTokenFromUrl(url) ==
'AuthSub token==tokenCKF50YzIH=')
self.assert_(gdata.auth.TokenFromUrl(url) == '=tokenCKF50YzIH=')
def testGetTokenFromHttpResponse(self):
response_body = ('Token=DQAA...7DCTN\r\n'
'Expiration=20061004T123456Z')
self.assert_(gdata.auth.AuthSubTokenFromHttpBody(response_body) ==
'AuthSub token=DQAA...7DCTN')
class CreateAuthSubTokenFlowTest(unittest.TestCase):
def testGenerateRequest(self):
request_url = gdata.auth.generate_auth_sub_url(next='http://example.com',
scopes=['http://www.blogger.com/feeds/',
'http://www.google.com/base/feeds/'])
self.assertEquals(request_url.protocol, 'https')
self.assertEquals(request_url.host, 'www.google.com')
self.assertEquals(request_url.params['scope'],
'http://www.blogger.com/feeds/ http://www.google.com/base/feeds/')
self.assertEquals(request_url.params['hd'], 'default')
self.assert_(request_url.params['next'].find('auth_sub_scopes') > -1)
self.assert_(request_url.params['next'].startswith('http://example.com'))
# Use a more complicated 'next' URL.
request_url = gdata.auth.generate_auth_sub_url(
next='http://example.com/?token_scope=http://www.blogger.com/feeds/',
scopes=['http://www.blogger.com/feeds/',
'http://www.google.com/base/feeds/'])
self.assert_(request_url.params['next'].find('auth_sub_scopes') > -1)
self.assert_(request_url.params['next'].find('token_scope') > -1)
self.assert_(request_url.params['next'].startswith('http://example.com/'))
def testParseNextUrl(self):
url = ('http://example.com/?auth_sub_scopes=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fwww.google.com%2Fbase%2Ffeeds%2F&'
'token=my_nifty_token')
token = gdata.auth.extract_auth_sub_token_from_url(url)
self.assertEquals(token.get_token_string(), 'my_nifty_token')
self.assert_(isinstance(token, gdata.auth.AuthSubToken))
self.assert_(token.valid_for_scope('http://www.blogger.com/feeds/'))
self.assert_(token.valid_for_scope('http://www.google.com/base/feeds/'))
self.assert_(
not token.valid_for_scope('http://www.google.com/calendar/feeds/'))
# Parse a more complicated response.
url = ('http://example.com/?auth_sub_scopes=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fwww.google.com%2Fbase%2Ffeeds%2F&'
'token_scope=http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F&'
'token=second_token')
token = gdata.auth.extract_auth_sub_token_from_url(url)
self.assertEquals(token.get_token_string(), 'second_token')
self.assert_(isinstance(token, gdata.auth.AuthSubToken))
self.assert_(token.valid_for_scope('http://www.blogger.com/feeds/'))
self.assert_(token.valid_for_scope('http://www.google.com/base/feeds/'))
self.assert_(
not token.valid_for_scope('http://www.google.com/calendar/feeds/'))
def testParseNextWithNoToken(self):
token = gdata.auth.extract_auth_sub_token_from_url('http://example.com/')
self.assert_(token is None)
token = gdata.auth.extract_auth_sub_token_from_url(
'http://example.com/?no_token=foo&other=1')
self.assert_(token is None)
class ExtractClientLoginTokenTest(unittest.TestCase):
def testExtractFromBodyWithScopes(self):
http_body_string = ('SID=DQAAAGgA7Zg8CTN\r\n'
'LSID=DQAAAGsAlk8BBbG\r\n'
'Auth=DQAAAGgAdk3fA5N')
token = gdata.auth.extract_client_login_token(http_body_string,
['http://docs.google.com/feeds/'])
self.assertEquals(token.get_token_string(), 'DQAAAGgAdk3fA5N')
self.assert_(isinstance(token, gdata.auth.ClientLoginToken))
self.assert_(token.valid_for_scope('http://docs.google.com/feeds/'))
self.assert_(not token.valid_for_scope('http://www.blogger.com/feeds'))
class ExtractOAuthTokensTest(unittest.TestCase):
def testOAuthTokenFromUrl(self):
scope_1 = 'http://docs.google.com/feeds/'
scope_2 = 'http://www.blogger.com/feeds/'
# Case 1: token and scopes both are present.
url = ('http://dummy.com/?oauth_token_scope=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&'
'oauth_token=CMns6t7MCxDz__8B')
token = gdata.auth.OAuthTokenFromUrl(url)
self.assertEquals('CMns6t7MCxDz__8B', token.key)
self.assertEquals(2, len(token.scopes))
self.assert_(scope_1 in token.scopes)
self.assert_(scope_2 in token.scopes)
# Case 2: token and scopes both are present but scope_param_prefix
# passed does not match the one present in the URL.
url = ('http://dummy.com/?oauth_token_scope=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&'
'oauth_token=CMns6t7MCxDz__8B')
token = gdata.auth.OAuthTokenFromUrl(url,
scopes_param_prefix='token_scope')
self.assertEquals('CMns6t7MCxDz__8B', token.key)
self.assert_(not token.scopes)
# Case 3: None present.
url = ('http://dummy.com/?no_oauth_token_scope=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&'
'no_oauth_token=CMns6t7MCxDz__8B')
token = gdata.auth.OAuthTokenFromUrl(url)
self.assert_(token is None)
def testOAuthTokenFromHttpBody(self):
token_key = 'ABCD'
token_secret = 'XYZ'
# Case 1: token key and secret both present single time.
http_body = 'oauth_token=%s&oauth_token_secret=%s' % (token_key,
token_secret)
token = gdata.auth.OAuthTokenFromHttpBody(http_body)
self.assertEquals(token_key, token.key)
self.assertEquals(token_secret, token.secret)
class OAuthInputParametersTest(unittest.TestCase):
def setUp(self):
self.oauth_input_parameters_hmac = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET)
self.oauth_input_parameters_rsa = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY,
rsa_key=RSA_KEY)
def testGetSignatureMethod(self):
self.assertEquals(
'HMAC-SHA1',
self.oauth_input_parameters_hmac.GetSignatureMethod().get_name())
rsa_signature_method = self.oauth_input_parameters_rsa.GetSignatureMethod()
self.assertEquals('RSA-SHA1', rsa_signature_method.get_name())
self.assertEquals(RSA_KEY, rsa_signature_method._fetch_private_cert(None))
def testGetConsumer(self):
self.assertEquals(CONSUMER_KEY,
self.oauth_input_parameters_hmac.GetConsumer().key)
self.assertEquals(CONSUMER_KEY,
self.oauth_input_parameters_rsa.GetConsumer().key)
self.assertEquals(CONSUMER_SECRET,
self.oauth_input_parameters_hmac.GetConsumer().secret)
self.assert_(self.oauth_input_parameters_rsa.GetConsumer().secret is None)
class TokenClassesTest(unittest.TestCase):
def testClientLoginToAndFromString(self):
token = gdata.auth.ClientLoginToken()
token.set_token_string('foo')
self.assertEquals(token.get_token_string(), 'foo')
self.assertEquals(token.auth_header, '%s%s' % (
gdata.auth.PROGRAMMATIC_AUTH_LABEL, 'foo'))
token.set_token_string(token.get_token_string())
self.assertEquals(token.get_token_string(), 'foo')
def testAuthSubToAndFromString(self):
token = gdata.auth.AuthSubToken()
token.set_token_string('foo')
self.assertEquals(token.get_token_string(), 'foo')
self.assertEquals(token.auth_header, '%s%s' % (
gdata.auth.AUTHSUB_AUTH_LABEL, 'foo'))
token.set_token_string(token.get_token_string())
self.assertEquals(token.get_token_string(), 'foo')
def testSecureAuthSubToAndFromString(self):
# Case 1: no token.
token = gdata.auth.SecureAuthSubToken(RSA_KEY)
token.set_token_string('foo')
self.assertEquals(token.get_token_string(), 'foo')
token.set_token_string(token.get_token_string())
self.assertEquals(token.get_token_string(), 'foo')
self.assertEquals(str(token), 'foo')
# Case 2: token is a string
token = gdata.auth.SecureAuthSubToken(RSA_KEY, token_string='foo')
self.assertEquals(token.get_token_string(), 'foo')
token.set_token_string(token.get_token_string())
self.assertEquals(token.get_token_string(), 'foo')
self.assertEquals(str(token), 'foo')
def testOAuthToAndFromString(self):
token_key = 'ABCD'
token_secret = 'XYZ'
# Case 1: token key and secret both present single time.
token_string = 'oauth_token=%s&oauth_token_secret=%s' % (token_key,
token_secret)
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assert_(-1 < token.get_token_string().find(token_string.split('&')[0]))
self.assert_(-1 < token.get_token_string().find(token_string.split('&')[1]))
self.assertEquals(token_key, token.key)
self.assertEquals(token_secret, token.secret)
# Case 2: token key and secret both present multiple times with unwanted
# parameters.
token_string = ('oauth_token=%s&oauth_token_secret=%s&'
'oauth_token=%s&ExtraParams=GarbageString' % (token_key,
token_secret,
'LMNO'))
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assert_(-1 < token.get_token_string().find(token_string.split('&')[0]))
self.assert_(-1 < token.get_token_string().find(token_string.split('&')[1]))
self.assertEquals(token_key, token.key)
self.assertEquals(token_secret, token.secret)
# Case 3: Only token key present.
token_string = 'oauth_token=%s' % (token_key,)
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assertEquals(token_string, token.get_token_string())
self.assertEquals(token_key, token.key)
self.assert_(not token.secret)
# Case 4: Only token key present.
token_string = 'oauth_token_secret=%s' % (token_secret,)
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assertEquals(token_string, token.get_token_string())
self.assertEquals(token_secret, token.secret)
self.assert_(not token.key)
# Case 5: None present.
token_string = ''
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assert_(token.get_token_string() is None)
self.assert_(not token.key)
self.assert_(not token.secret)
def testSecureAuthSubGetAuthHeader(self):
# Case 1: Presence of OAuth token (in case of 3-legged OAuth)
url = 'http://dummy.com/?q=notebook&s=true'
token = gdata.auth.SecureAuthSubToken(RSA_KEY, token_string='foo')
auth_header = token.GetAuthHeader('GET', url)
self.assert_('Authorization' in auth_header)
header_value = auth_header['Authorization']
self.assert_(header_value.startswith(r'AuthSub token="foo"'))
self.assert_(-1 < header_value.find(r'sigalg="rsa-sha1"'))
self.assert_(-1 < header_value.find(r'data="'))
self.assert_(-1 < header_value.find(r'sig="'))
m = re.search(r'data="(.*?)"', header_value)
self.assert_(m is not None)
data = m.group(1)
self.assert_(data.startswith('GET'))
self.assert_(-1 < data.find(url))
def testOAuthGetAuthHeader(self):
# Case 1: Presence of OAuth token (in case of 3-legged OAuth)
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY,
rsa_key=RSA_KEY)
token = gdata.auth.OAuthToken(key='ABCDDSFFDSG',
oauth_input_params=oauth_input_params)
auth_header = token.GetAuthHeader('GET',
'http://dummy.com/?q=notebook&s=true',
realm='http://dummy.com')
self.assert_('Authorization' in auth_header)
header_value = auth_header['Authorization']
self.assert_(-1 < header_value.find(r'OAuth realm="http://dummy.com"'))
self.assert_(-1 < header_value.find(r'oauth_version="1.0"'))
self.assert_(-1 < header_value.find(r'oauth_token="ABCDDSFFDSG"'))
self.assert_(-1 < header_value.find(r'oauth_nonce="'))
self.assert_(-1 < header_value.find(r'oauth_timestamp="'))
self.assert_(-1 < header_value.find(r'oauth_signature="'))
self.assert_(-1 < header_value.find(
r'oauth_consumer_key="%s"' % CONSUMER_KEY))
self.assert_(-1 < header_value.find(r'oauth_signature_method="RSA-SHA1"'))
# Case 2: Absence of OAuth token (in case of 2-legged OAuth)
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET)
token = gdata.auth.OAuthToken(oauth_input_params=oauth_input_params)
auth_header = token.GetAuthHeader(
'GET', 'http://dummy.com/?xoauth_requestor_id=user@gmail.com&q=book')
self.assert_('Authorization' in auth_header)
header_value = auth_header['Authorization']
self.assert_(-1 < header_value.find(r'OAuth realm=""'))
self.assert_(-1 < header_value.find(r'oauth_version="1.0"'))
self.assertEquals(-1, header_value.find(r'oauth_token='))
self.assert_(-1 < header_value.find(r'oauth_nonce="'))
self.assert_(-1 < header_value.find(r'oauth_timestamp="'))
self.assert_(-1 < header_value.find(r'oauth_signature="'))
self.assert_(-1 < header_value.find(
r'oauth_consumer_key="%s"' % CONSUMER_KEY))
self.assert_(-1 < header_value.find(r'oauth_signature_method="HMAC-SHA1"'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
JamesShaeffer/QGIS | python/plugins/processing/algs/gdal/polygonize.py | 15 | 5397 | # -*- coding: utf-8 -*-
"""
***************************************************************************
polygonize.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsProcessing,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingParameterVectorDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class polygonize(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
FIELD = 'FIELD'
EIGHT_CONNECTEDNESS = 'EIGHT_CONNECTEDNESS'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
1,
parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterString(self.FIELD,
self.tr('Name of the field to create'),
defaultValue='DN'))
self.addParameter(QgsProcessingParameterBoolean(self.EIGHT_CONNECTEDNESS,
self.tr('Use 8-connectedness'),
defaultValue=False))
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(QgsProcessingParameterVectorDestination(self.OUTPUT,
self.tr('Vectorized'),
QgsProcessing.TypeVectorPolygon))
def name(self):
return 'polygonize'
def displayName(self):
return self.tr('Polygonize (raster to vector)')
def group(self):
return self.tr('Raster conversion')
def groupId(self):
return 'rasterconversion'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'polygonize.png'))
def commandName(self):
return 'gdal_polygonize'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = []
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(inLayer.source())
outFile = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, outFile)
output, outFormat = GdalUtils.ogrConnectionStringAndFormat(outFile, context)
arguments.append(output)
if self.parameterAsBoolean(parameters, self.EIGHT_CONNECTEDNESS, context):
arguments.append('-8')
arguments.append('-b')
arguments.append(str(self.parameterAsInt(parameters, self.BAND, context)))
if outFormat:
arguments.append('-f {}'.format(outFormat))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
layerName = GdalUtils.ogrOutputLayerName(output)
if layerName:
arguments.append(layerName)
arguments.append(self.parameterAsString(parameters, self.FIELD, context))
return [self.commandName() + ('.bat' if isWindows() else '.py'), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
pfnet/chainer | examples/wavenet/train.py | 6 | 5955 | import argparse
import os
import pathlib
import warnings
import numpy
import chainer
from chainer.training import extensions
import chainerx
from net import EncoderDecoderModel
from net import UpsampleNet
from net import WaveNet
from utils import Preprocess
import matplotlib
matplotlib.use('Agg')
parser = argparse.ArgumentParser(description='Chainer example: WaveNet')
parser.add_argument('--batchsize', '-b', type=int, default=4,
help='Numer of audio clips in each mini-batch')
parser.add_argument('--length', '-l', type=int, default=7680,
help='Number of samples in each audio clip')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--dataset', '-i', default='./VCTK-Corpus',
help='Directory of dataset')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--n_loop', type=int, default=4,
help='Number of residual blocks')
parser.add_argument('--n_layer', type=int, default=10,
help='Number of layers in each residual block')
parser.add_argument('--a_channels', type=int, default=256,
help='Number of channels in the output layers')
parser.add_argument('--r_channels', type=int, default=64,
help='Number of channels in residual layers and embedding')
parser.add_argument('--s_channels', type=int, default=256,
help='Number of channels in the skip layers')
parser.add_argument('--use_embed_tanh', type=bool, default=True,
help='Use tanh after an initial 2x1 convolution')
parser.add_argument('--seed', type=int, default=0,
help='Random seed to split dataset into train and test')
parser.add_argument('--snapshot_interval', type=int, default=10000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
parser.add_argument('--process', type=int, default=1,
help='Number of parallel processes')
parser.add_argument('--prefetch', type=int, default=8,
help='Number of prefetch samples')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
print('GPU: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
if device.xp is chainer.backends.cuda.cupy:
chainer.global_config.autotune = True
# Datasets
if not os.path.isdir(args.dataset):
raise RuntimeError('Dataset directory not found: {}'.format(args.dataset))
paths = sorted([
str(path) for path in pathlib.Path(args.dataset).glob('wav48/*/*.wav')])
preprocess = Preprocess(
sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
length=args.length, quantize=args.a_channels)
dataset = chainer.datasets.TransformDataset(paths, preprocess)
train, valid = chainer.datasets.split_dataset_random(
dataset, int(len(dataset) * 0.9), args.seed)
# Networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
args.n_loop, args.n_layer,
args.a_channels, args.r_channels, args.s_channels,
args.use_embed_tanh)
model = chainer.links.Classifier(EncoderDecoderModel(encoder, decoder))
# Optimizer
optimizer = chainer.optimizers.Adam(1e-4)
optimizer.setup(model)
# Iterators
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize,
n_processes=args.process, n_prefetch=args.prefetch)
valid_iter = chainer.iterators.MultiprocessIterator(
valid, args.batchsize, repeat=False, shuffle=False,
n_processes=args.process, n_prefetch=args.prefetch)
# Updater and Trainer
updater = chainer.training.StandardUpdater(
train_iter, optimizer, device=device)
trainer = chainer.training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
# Extensions
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(extensions.Evaluator(valid_iter, model, device=device))
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'main/loss', 'main/accuracy',
'validation/main/loss', 'validation/main/accuracy']),
trigger=display_interval)
trainer.extend(extensions.PlotReport(
['main/loss', 'validation/main/loss'],
'iteration', file_name='loss.png', trigger=display_interval))
trainer.extend(extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'iteration', file_name='accuracy.png', trigger=display_interval))
trainer.extend(extensions.ProgressBar(update_interval=10))
# Resume
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
# Run
trainer.run()
| mit |
R-bob/R-bob.github.io | ardupilot-ArduCopter-3.2.1/Tools/LogAnalyzer/tests/TestBrownout.py | 74 | 1401 | from LogAnalyzer import Test,TestResult
import DataflashLog
import collections
class TestBrownout(Test):
'''test for a log that has been truncated in flight'''
def __init__(self):
Test.__init__(self)
self.name = "Brownout"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if "EV" in logdata.channels:
# step through the arm/disarm events in order, to see if they're symmetrical
# note: it seems landing detection isn't robust enough to rely upon here, so we'll only consider arm+disarm, not takeoff+land
isArmed = False
for line,ev in logdata.channels["EV"]["Id"].listData:
if ev == 10:
isArmed = True
elif ev == 11:
isArmed = False
if "CTUN" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No CTUN log data"
return
# check for relative altitude at end
if "CTUN" in logdata.channels and "BarAlt" in logdata.channels["CTUN"]:
(finalAlt,finalAltLine) = logdata.channels["CTUN"]["BarAlt"].getNearestValue(logdata.lineCount, lookForwards=False)
finalAltMax = 3.0 # max alt offset that we'll still consider to be on the ground
if isArmed and finalAlt > finalAltMax:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Truncated Log? Ends while armed at altitude %.2fm" % finalAlt
| gpl-2.0 |
borysf/SublimeGerrit | core/change_view.py | 1 | 35778 | """
SublimeGerrit - full-featured Gerrit Code Review for Sublime Text
Copyright (C) 2015 Borys Forytarz <borys.forytarz@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import sublime
import os
import re
import webbrowser
from urllib.parse import urlparse
from .base_view import BaseView
from .template import Template
from .utils import quick_panel, capwords, sort, sort_alpha, error_message, info_message, fix_download_command, git_root, mkdate, get_reviewer_name, ellipsis
from .resources import GerritResources
from .settings import Settings
from .reader import DataReader
from .diff_view import DiffView
from .reloader import Reloader
from .git import Git
class ChangeView(BaseView):
window = None
detached = False
@classmethod
def get_instance_for_change_id(self, change_id):
for window_id in BaseView.instances:
for instance in BaseView.instances[window_id]:
if instance.change_id() == change_id:
return instance
return None
def __init__(self, change):
self.opener = sublime.active_window()
if ChangeView.window is None or ChangeView.window not in sublime.windows():
if Settings.get('change.separate_window'):
sublime.run_command('new_window')
ChangeView.window = sublime.active_window()
ChangeView.detached = True
else:
ChangeView.window = self.opener
self.view = ChangeView.window.new_file()
self.view.settings().set('is_sublimegerrit_change_view', True)
if ChangeView.detached:
sublime.set_timeout(lambda: self.opener.focus_view(self.view), 200)
BaseView.__init__(self)
syntax_file = "/".join(['Packages', 'SublimeGerrit', 'syntax', 'SublimeGerrit.tmLanguage'])
self.view.set_scratch(True)
self.view.set_read_only(True)
self.view.set_syntax_file(syntax_file)
self.diff_view = None
self.destroying = False
self.draft_comments = None
self.comments = None
self.resources = GerritResources()
self.links = []
self.loading = False
self.change = None
view_settings = self.view.settings()
settings_to_apply = Settings.get('change_view')
view_settings.set('line_numbers', False)
view_settings.set('highlight_line', False)
view_settings.set('gutter', False)
view_settings.set('caret_extra_top', 0)
view_settings.set('caret_extra_bottom', 0)
view_settings.set('caret_extra_width', 0)
view_settings.set('sublimerge_off', True)
for name in settings_to_apply:
view_settings.set(name, settings_to_apply[name])
self.git = Git(
change[0]['project'],
change[0]['branch'],
change[0]['change_id'],
self.opener,
ChangeView.window
)
self.prepare(change)
self.get_comments(self.render)
def views(self):
return [self.view]
def get_comments(self, callback):
self.comments = self.draft_comments = None
def done(data, regular):
if regular:
store = self.comments = {}
else:
store = self.draft_comments = {}
if data is not None:
for fname in data:
store.update({fname: len(data[fname])})
if self.comments is not None and self.draft_comments is not None:
callback()
revision = self.get_current_rev()
self.resources.get_comments(self.change['_number'], revision['_number']).then(lambda data: done(data, True))
self.resources.get_draft_comments(self.change['_number'], revision['_number']).then(lambda data: done(data, False))
def focus(self):
if self.view:
win = self.view.window()
if win:
win.focus_view(self.view)
def change_id(self):
return self.change['id']
def prepare(self, change):
self.render_loader()
self.loading = True
self.change = change[0]
self.change['created'] = mkdate(self.change['created'])
self.change['updated'] = mkdate(self.change['updated'])
self.review = {'__MSG__': None}
self.review_selected = {}
self_data = self.resources.get_self()
for label_name in self.change['permitted_labels'].keys():
self.review.update({label_name: ' 0'})
self.review_selected.update({label_name: True})
if label_name in self.change['labels'] and 'all' in self.change['labels'][label_name]:
for vote in self.change['labels'][label_name]['all']:
if self_data is not None and vote['_account_id'] == self_data['_account_id']:
value = vote['value']
if value > 0:
value = '+%d' % value
elif value < 0:
value = '%d' % value
else:
value = ' 0'
self.review.update({label_name: value})
self.to_render = []
self.longest_line = 0
def render_loader(self):
if self.change is None:
self.view.set_name('*GERRIT* Loading...')
self.view.run_command('sublime_gerrit_clear')
self.view.run_command('sublime_gerrit_insert', {
'content': 'Loading...',
'pos': 0
})
def render(self):
def do_render(submit_type):
if submit_type is not None:
self.change.update({'submit_type': capwords(submit_type, '_')})
else:
self.change.update({'submit_type': ''})
self.view.set_name(Template('change_title').applystr(self.change))
current_rev = self.get_current_rev()
if self.has_action('rebase'):
self.insert(['NOTICE: THIS CHANGE IS OUTDATED AND NEEDS REBASE!', '', ''])
self.render_header()
self.render_reviewers()
self.render_depends()
self.render_current_rev()
self.render_comments()
self.finish_render()
self.loading = False
self.resources.submit_type(self.change['_number'], self.get_current_rev()['_number']).then(do_render)
def render_header(self):
self.change['status'] = 'SUBMITTED, Merge Pending' if self.change['status'] == 'SUBMITTED' else self.change['status']
self.insert(Template('change_commit_message').apply(self.get_current_rev()))
self.insert(Template('change_summary').apply(self.change))
def render_reviewers(self):
items = {}
longest = {'name': 0}
for label_name in self.change['labels']:
if not label_name in longest:
longest.update({label_name: 0})
if 'all' in self.change['labels'][label_name]:
for reviewer in self.change['labels'][label_name]['all']:
name = get_reviewer_name(reviewer)
if name not in items:
items.update({name: {}})
longest['name'] = max(longest['name'], len(name))
if 'value' in reviewer:
value = ('+' if reviewer['value'] > 0 else '') + str(reviewer['value'])
else:
value = ''
items[name].update({label_name: value})
longest[label_name] = max(longest[label_name], len(value))
if longest['name'] > 0:
lines = ['Reviewer' + (' ' * (longest['name'] - 8))]
for label_name in self.change['labels']:
lines[0] += ' ' + label_name
for name in items:
line = name + (' ' * (longest['name'] - len(name)))
for label_name in items[name]:
value = items[name][label_name]
value = (' ' * (longest[label_name] - len(value))) + value
pad_left = int((len(label_name) - longest[label_name]) / 2)
line += ' ' + (' ' * pad_left)
line += value
line += ' ' * (len(label_name) - len(value) - pad_left)
lines.append(line)
lines.append('')
self.insert(lines)
def render_depends(self):
current_rev = self.get_current_rev()
if 'commit' in current_rev and 'parents' in current_rev['commit'] and len(current_rev['commit']['parents']) > 0:
self.insert(Template('change_depends_header').apply({}))
for parent in current_rev['commit']['parents']:
self.insert(Template('change_depends_item').apply(parent))
def render_current_rev(self):
ordered = sort(self.change['revisions'], lambda a, b: self.change['revisions'][a]['_number'] - self.change['revisions'][b]['_number'])
current_rev = self.get_current_rev()
self.insert(Template('change_patch_sets_header').apply({}))
for rev in ordered:
self.change['revisions'][rev].update({'revision': rev})
self.insert(Template('change_patch_set_header').apply(self.change['revisions'][rev]))
if 'commit' in current_rev and self.change['revisions'][rev] is current_rev:
current_rev['commit']['author']['date'] = mkdate(current_rev['commit']['author']['date'])
current_rev['commit']['committer']['date'] = mkdate(current_rev['commit']['committer']['date'])
self.insert(Template('change_patch_set_commit').apply(current_rev))
longest = 0
ordered_files = sort_alpha(current_rev['files'])
for filename in current_rev['files']:
item = current_rev['files'][filename]
filename = ((item['old_path'] + ' -> ') if 'old_path' in item else '') + filename
longest = max(longest, len(filename))
for filename in ordered_files:
item = current_rev['files'][filename]
lines = []
if 'status' not in item:
item.update({'status': 'M'})
if 'lines_inserted' in item:
lines.append('+%d' % item['lines_inserted'])
else:
lines.append('+0')
if 'lines_deleted' in item:
lines.append('-%d' % item['lines_deleted'])
else:
lines.append('-0')
name = ((item['old_path'] + ' -> ') if 'old_path' in item else '') + filename
comments = self.comments_count_text(filename)
item.update({
'lines_total': ', '.join(lines),
'filename': name + (' ' * (longest - len(name))),
'draft_comments': '(' + comments + ')' if comments else ''
})
self.insert(Template('change_patch_set_file').apply(item))
def comments_count_text(self, filename):
comments = []
if self.draft_comments is not None:
if filename in self.draft_comments:
count = self.draft_comments[filename]
comments.append('%d draft%s' % (count, 's' if count > 1 else ''))
if self.comments is not None:
if filename in self.comments:
count = self.comments[filename]
comments.append('%d comment%s' % (count, 's' if count > 1 else ''))
return ', '.join(comments)
def render_comments(self):
self.insert(Template('change_comments_header').apply({}))
for msg in reversed(self.change['messages']) if Settings.get('change.reverse_comments') else self.change['messages']:
msg['author_name'] = msg['author_name'] or msg['author_username'] or msg['author_email'] or 'Gerrit Code Review'
msg['date'] = mkdate(msg['date'])
self.insert(Template('change_comment_message').apply(msg))
def insert(self, content, pos=None):
for part in content:
for line in part.split('\n'):
self.longest_line = max(self.longest_line, len(line))
self.to_render += content
def finish_render(self):
self.view.run_command('sublime_gerrit_clear')
for i in range(0, len(self.to_render)):
if self.to_render[i] == '--':
self.to_render[i] = '-' * self.longest_line
self.view.run_command('sublime_gerrit_insert', {
'pos': 0,
'content': "\n".join(self.to_render)
})
self.find_links()
def display_download_menu(self):
current_rev = self.get_current_rev()
items = []
if 'fetch' in current_rev:
command = None
tries = [
Settings.get('git.quick_checkout_default_protocol'),
'git',
'ssh',
'http',
'anonymous http'
]
for proto in tries:
if proto in current_rev['fetch']:
commands = current_rev['fetch'][proto]['commands']
if 'Checkout' in commands:
command = commands['Checkout']
elif 'Pull' in commands:
command = commands['Pull']
if command is not None:
break
if command is not None:
items = [{
'caption': ['Quick Checkout'],
'command': fix_download_command(command),
'on_select': lambda selected: self.git.checkout(selected['command'])
}]
for via in self.get_current_rev()['fetch']:
items.append({
'caption': ['Download via %s' % via.upper()],
'via': via,
'on_select': lambda selected: self.display_download_menu_via(selected['via'])
})
if len(items) == 0:
error_message('No download commands could be found. Check if Gerrit server has installed `download-commands` plugin.');
return
quick_panel(items)
def display_download_menu_via(self, via):
def set_clipboard(data):
sublime.set_clipboard(data)
sublime.status_message('Copied to clipboard: `%s`' % data)
sublime.set_timeout(lambda: sublime.status_message(''), 4000)
def sorter(a, b):
a = a.upper()
b = b.upper()
return (a > b) - (a < b)
current_rev = self.get_current_rev()
items = []
for command_name in sort(current_rev['fetch'][via]['commands'].keys(), sorter):
command = fix_download_command(current_rev['fetch'][via]['commands'][command_name])
items.append({
'caption': [command_name, command],
'command_name': command_name.lower(),
'command': command,
'on_select': lambda selected: set_clipboard(selected['command'])
})
quick_panel(items)
def display_switch_ps_menu(self):
def switch_ps(change, revision_id):
change[0]['current_revision'] = revision_id
self.refresh(change)
items = []
ordered = sort(self.change['revisions'], lambda a, b: self.change['revisions'][a]['_number'] - self.change['revisions'][b]['_number'])
for rev in ordered:
revision = self.change['revisions'][rev]
if revision is not self.get_current_rev():
items.append({
'caption': ['Patch Set %d' % revision['_number']],
'rev': rev,
'on_select': lambda item: self.resources.change(self.change['_number']).then(lambda data: switch_ps(data, item['rev']))
})
quick_panel(items)
def is_current_ps(self):
if 'current_revision' not in self.change or not self.change['current_revision']:
return True
ordered = sort(self.change['revisions'], lambda a, b: self.change['revisions'][a]['_number'] - self.change['revisions'][b]['_number'])
return self.change['current_revision'] == ordered[-1]
def display_review_menu(self, only_label=None):
if self.destroying:
return
items = []
def on_select(item, erase):
self.review[item['label']] = item['value']
self.review_selected[item['label']] = not erase
self.display_review_menu(
only_label=item['label'] if erase else None
)
def add_cover_message(text):
self.review['__MSG__'] = text
self.display_review_menu()
show_submission = True
if self.is_current_ps():
for label_name in self.review:
if label_name != '__MSG__' and (only_label is None or only_label == label_name):
if self.review_selected[label_name]:
try:
current_label_text = self.change['labels'][label_name]['values'][self.review[label_name]]
except:
self.review[label_name] = ' 0'
current_label_text = self.change['labels'][label_name]['values'][self.review[label_name]]
items.append({
'caption': [
'%s: %s' % (label_name, self.review[label_name].strip()),
current_label_text
],
'value': self.review[label_name],
'label': label_name,
'on_select': lambda item: on_select(item, True)
})
else:
show_submission = False
for value in reversed(self.change['permitted_labels'][label_name]):
items.append({
'caption': [
'%s: %s' % (label_name, value.strip()),
self.change['labels'][label_name]['values'][value]
],
'selected': (
label_name in self.review and value == self.review[label_name]
),
'value': value,
'label': label_name,
'on_select': lambda item: on_select(item, False)
})
self.review_selected[label_name] = True
if show_submission:
items.append({
'caption': [
'Add Cover Message' if self.review['__MSG__'] is None else 'Change Cover Message',
ellipsis(self.review['__MSG__']) or '<Message not set>'
],
'on_select': lambda item:
sublime.set_timeout(
lambda: ChangeView.window.show_input_panel(
'Cover Message',
self.review['__MSG__'] or '',
add_cover_message,
None,
self.display_review_menu
)
, 100)
})
items.append({
'caption': ['Publish Comments', 'Publishes comments only, does not merge into repository'],
'on_select': self.publish_comments
})
if self.has_action('submit'): # and self.change['status'] in ['NEW']:
items.append({
'caption': ['Publish and Submit', 'Publishes comments and merges into repository'],
'on_select': self.publish_and_submit
})
quick_panel(items)
def publish_comments(self, item, then=None):
review = {'labels': {}}
for label in self.review:
if label != '__MSG__':
review['labels'].update({label: int(self.review[label].strip())})
elif self.review[label] and self.review[label].strip():
review['message'] = self.review[label].strip()
if then is None:
then = lambda data: self.reload()
def submit(comments):
review.update({'comments': comments})
self.resources.review(
self.change['_number'],
self.get_current_rev()['_number'],
review
).then(then)
self.resources.get_draft_comments(
self.change['_number'],
self.get_current_rev()['_number']
).then(submit)
def publish_and_submit(self, item):
def on_done(data):
self.reload()
if data is not None and 'status' in data:
info_message('Change status: ' + data['status'])
self.publish_comments(item, lambda data: self.resources.submit(
self.change['_number'],
self.get_current_rev()['_number']
).then(on_done))
def rebase(self):
self.resources.rebase(
self.change['_number'],
self.get_current_rev()['_number'],
).then(lambda data: self.reload())
def abandon(self):
if sublime.ok_cancel_dialog('SublimeGerrit\n\nAre you sure you want to abandon this change?'):
self.resources.abandon(
self.change['_number']
).then(lambda data: self.reload())
def publish(self):
self.resources.publish(
self.change['_number']
).then(lambda data: self.reload())
def delete(self):
if sublime.ok_cancel_dialog('SublimeGerrit\n\nAre you sure you want to delete this draft change?'):
self.resources.delete(
self.change['_number']
).then(lambda data: self.destroy())
def restore(self):
self.resources.restore(
self.change['_number']
).then(lambda data: self.reload())
def reload(self):
if self.loading:
return
self.render_loader()
self.resources.change(self.change['_number']).then(self.refresh)
def refresh(self, data=None):
if self.loading:
return
if data is None:
data = [self.change]
self.prepare(data)
self.get_comments(self.render)
def get_current_rev_id(self):
if self.change['current_revision']:
return self.change['current_revision']
else:
ordered = sort(self.change['revisions'],
lambda a, b: self.change['revisions'][a]['_number'] - self.change['revisions'][b]['_number']
)
return ordered[0]
def get_current_rev(self):
if self.change['current_revision']:
return self.change['revisions'][self.change['current_revision']]
else:
ordered = sort(self.change['revisions'],
lambda a, b: self.change['revisions'][a]['_number'] - self.change['revisions'][b]['_number']
)
return self.change['revisions'][ordered[0]]
def has_action(self, action):
current_rev = self.get_current_rev()
if 'actions' in current_rev and action in current_rev['actions'] and 'enabled' in current_rev['actions'][action] and current_rev['actions'][action]['enabled']:
return current_rev['actions'][action]
if (
len(self.change['actions']) == 1 and
action in self.change['actions'][0] and
self.change['actions'][0][action] and
'enabled' in self.change['actions'][0][action] and
self.change['actions'][0][action]['enabled']
):
return self.change['actions'][0][action]
return False
def delete_topic(self):
pass
def set_topic(self):
def submit(topic):
if topic:
self.resources.set_topic(self.change['_number'], topic).then(
lambda data: self.reload()
)
else:
self.resources.delete_topic(self.change['_number']).then(
lambda data: self.reload()
)
ChangeView.window.show_input_panel('Topic', self.change['topic'], submit, None, None)
def display_remove_reviewers_menu(self):
items = []
for reviewer in self.change['removable_reviewers']:
item = [get_reviewer_name(reviewer)]
if 'email' in reviewer:
item.append(reviewer['email'])
items.append({
'caption': item,
'account_id': reviewer['_account_id'],
'on_select': lambda selected: self.remove_reviewer(selected['account_id'])
})
quick_panel(items)
def remove_reviewer(self, account_id):
self.resources.remove_reviewer(self.change['_number'], account_id).then(lambda data: self.reload())
def display_add_reviewer_menu(self):
def menu(data, text):
if len(data) > 0:
items = []
for reviewer in data:
items.append({
'caption': [reviewer['group_name'] or reviewer['account_name'], reviewer['email']],
'id': reviewer['group_id'] or reviewer['account_id'],
'on_select': lambda selected: self.add_reviewer(selected['id'])
})
quick_panel(items, on_cancel=lambda: prompt(text))
else:
error_message('No reviewer match your query.')
prompt(text)
def get_suggestions(text):
if text:
self.resources.suggest_reviewers(self.change['id'], text, 10).then(lambda data: menu(data, text))
def prompt(text=''):
ChangeView.window.show_input_panel('Name or Email or Group', text, get_suggestions, None, None)
prompt()
def add_reviewer(self, account_id):
self.resources.add_reviewer(self.change['_number'], account_id).then(lambda data: self.reload())
def display_changes_menu(self):
current_rev = self.get_current_rev()
items = []
ordered = sort_alpha(current_rev['files'])
for filename in ordered:
f = current_rev['files'][filename]
if 'binary' in f:
continue
comments = self.comments_count_text(filename)
items.append({
'caption': [filename, f['status'] + ' ' + f['lines_total'] + ((', ' + comments) if comments else '')],
'filename': filename,
'status': f['status'],
'on_select': lambda selected: self.open_diff(selected['status'], selected['filename'])
})
quick_panel(items)
def open_diff(self, status, filename):
if self.diff_view is not None:
self.diff_view.destroy()
ordered = sort(self.change['revisions'], lambda a, b:
self.change['revisions'][a]['_number'] - self.change['revisions'][b]['_number']
)
revisions = [self.change['revisions'][rev] for rev in ordered]
self.diff_view = DiffView(
self.view,
self.change['_number'],
self.get_current_rev(),
filename,
revisions
)
def edit_commit_message(self):
current_rev = self.get_current_rev()
text = current_rev['commit']['message'] if 'commit' in current_rev and 'message' in current_rev['commit'] else ''
ChangeView.window.show_input_panel(
'Commit Message',
text,
lambda text:
self.resources.edit_commit_message(
self.change['_number'],
current_rev['revision'],
text
).then(lambda data: self.reload()),
None,
None
)
def cherry_pick(self):
current_rev = self.get_current_rev()
def on_done(data):
if data is not None:
if sublime.ok_cancel_dialog('SublimeGerrit\n\nCherry Pick successful!\nWould you like to view the cherry picked change now?'):
self.resources.change(data[0]['_number']).then(lambda change: ChangeView(change))
def submit(ref, message):
self.resources.cherry_pick_to(self.change['_number'], current_rev['_number'], ref, message).then(on_done)
def display_branches_menu(data):
items = []
for branch in data:
matches = re.match('^refs/heads/(.+)$', branch['ref'])
if matches and matches.group(1) != self.change['branch']:
items.append({
'caption': [matches.group(1), branch['revision']],
'ref': matches.group(1),
'on_select': prompt
})
if len(items) > 0:
quick_panel(items)
else:
error_message('No other branches than `%s` known to Gerrit.' % self.change['branch'])
def prompt(selected):
text = current_rev['commit']['message'] if 'commit' in current_rev and 'message' in current_rev['commit'] else ''
ChangeView.window.show_input_panel('Cherry Pick Commit Message', text, lambda text: submit(selected['ref'], text), None, None)
self.resources.project_branches(self.change['project']).then(display_branches_menu)
def find_links(self):
self.links = []
regex = re.compile("\\bhttps?://[-a-z0-9+&@#/%?=~_()|!:,.;]*[-a-z0-9+&@#/%=~_(|]", re.IGNORECASE)
for match in regex.finditer(self.view.substr(sublime.Region(0, self.view.size()))):
link = {
'region': sublime.Region(match.start(), match.start() + len(match.group())),
'link': match.group()
}
self.links.append(link)
self.view.add_regions(
'link-%d' % match.start(),
[link['region']],
self.view.scope_name(match.start()),
'',
sublime.DRAW_SOLID_UNDERLINE | sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.HIDE_ON_MINIMAP
)
def destroy(self, unloading=False):
if self.destroying:
return
self.destroying = True
# forces all panels that could be left by the view to be closed
ChangeView.window.get_output_panel('fake-close')
ChangeView.window.run_command('show_panel', {'panel': 'output.fake-close'})
ChangeView.window.run_command('hide_panel', {'panel': 'output.fake-close'})
if self.view.window():
w = self.view.window()
w.focus_view(self.view)
w.run_command('close')
if not unloading and self.git.is_applied():
self.git.revert()
if self.diff_view is not None:
self.diff_view.destroy()
self.diff_view = None
BaseView.destroy(self)
if ChangeView.detached:
if len(ChangeView.window.views()) == 0:
ChangeView.window.run_command('close_window')
def download_cmd(self):
return self.display_download_menu if not self.git.is_applied() else False
def revert_checkout_cmd(self):
return self.git.revert if self.git.is_applied() else False
def review_cmd(self):
return self.display_review_menu if self.has_action('submit') or len(self.change['permitted_labels'].keys()) > 0 else False
def view_changes_cmd(self):
cr = self.get_current_rev()
return self.display_changes_menu if len([f for f in cr['files'] if 'binary' not in cr['files'][f] or not cr['files'][f]['binary']]) > 0 else False
def switch_patch_set_cmd(self):
return self.display_switch_ps_menu if len(self.change['revisions'].keys()) > 1 else False
def add_reviewer_cmd(self):
return self.display_add_reviewer_menu if self.change['status'] not in ['ABANDONED', 'MERGED'] else False
def remove_reviewer_cmd(self):
return self.display_remove_reviewers_menu if self.change['status'] not in ['ABANDONED', 'MERGED'] and len(self.change['removable_reviewers']) > 0 else False
def rebase_cmd(self):
return self.rebase if self.has_action('rebase') else False
def abandon_cmd(self):
return self.abandon if self.has_action('abandon') else False
def publish_draft_cmd(self):
return self.publish if self.has_action('publish') and self.change['status'] not in ['ABANDONED'] else False
def delete_draft_cmd(self):
del_action = self.has_action('/')
return self.delete if del_action and del_action['method'] == 'DELETE' and re.match('^Delete draft change', del_action['title']) else False
def restore_cmd(self):
return self.restore if self.has_action('restore') else False
def edit_commit_message_cmd(self):
return self.edit_commit_message if self.has_action('message') else False
def edit_topic_cmd(self):
return self.set_topic
def cherry_pick_cmd(self):
return self.cherry_pick if self.has_action('cherrypick') else False
def refresh_cmd(self):
return self.reload
def on_selection_modified(self, view):
def switch_ps(change, revision_id):
change[0]['current_revision'] = revision_id
self.refresh(change)
sel = self.view.sel()
if len(sel) == 1:
text = self.view.substr(sel[0])
if sel[0].size() > 0:
for link in self.links:
if link['region'].contains(sel[0]):
sel.clear()
webbrowser.open(link['link'], autoraise=True)
return
if re.match('^[a-zA-Z0-9]{40,}$', text):
if text in self.change['revisions']:
if not self.loading:
self.resources.change(self.change['_number']).then(lambda data: switch_ps(data, text))
return
ChangeView.window.run_command('sublime_gerrit_search', {
'text': text,
'autorun': True
})
def on_modified(self, view):
pass
def on_activated(self, view):
if self.diff_view is not None:
self.diff_view.focus()
def on_deactivated(self, view):
pass
def on_close(self, view):
self.destroy()
| gpl-2.0 |
ionelmc/sphinx-py3doc-enhanced-theme | ci/bootstrap.py | 12 | 2886 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import subprocess
import sys
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
base_path = dirname(dirname(abspath(__file__)))
def check_call(args):
print("+", *args)
subprocess.check_call(args)
def exec_in_env():
env_path = join(base_path, ".tox", "bootstrap")
if sys.platform == "win32":
bin_path = join(env_path, "Scripts")
else:
bin_path = join(env_path, "bin")
if not exists(env_path):
import subprocess
print("Making bootstrap env in: {0} ...".format(env_path))
try:
check_call([sys.executable, "-m", "venv", env_path])
except subprocess.CalledProcessError:
try:
check_call([sys.executable, "-m", "virtualenv", env_path])
except subprocess.CalledProcessError:
check_call(["virtualenv", env_path])
print("Installing `jinja2` into bootstrap environment...")
check_call([join(bin_path, "pip"), "install", "jinja2", "tox"])
python_executable = join(bin_path, "python")
if not os.path.exists(python_executable):
python_executable += '.exe'
print("Re-executing with: {0}".format(python_executable))
print("+ exec", python_executable, __file__, "--no-env")
os.execv(python_executable, [python_executable, __file__, "--no-env"])
def main():
import jinja2
print("Project path: {0}".format(base_path))
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(join(base_path, "ci", "templates")),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True
)
tox_environments = [
line.strip()
# 'tox' need not be installed globally, but must be importable
# by the Python that is running this script.
# This uses sys.executable the same way that the call in
# cookiecutter-pylibrary/hooks/post_gen_project.py
# invokes this bootstrap.py itself.
for line in subprocess.check_output([sys.executable, '-m', 'tox', '--listenvs'], universal_newlines=True).splitlines()
]
tox_environments = [line for line in tox_environments if line.startswith('py')]
for name in os.listdir(join("ci", "templates")):
with open(join(base_path, name), "w") as fh:
fh.write(jinja.get_template(name).render(tox_environments=tox_environments))
print("Wrote {}".format(name))
print("DONE.")
if __name__ == "__main__":
args = sys.argv[1:]
if args == ["--no-env"]:
main()
elif not args:
exec_in_env()
else:
print("Unexpected arguments {0}".format(args), file=sys.stderr)
sys.exit(1)
| bsd-2-clause |
trafi/djinni | test-suite/generated-src/python/foo_primitives.py | 1 | 7411 | # AUTOGENERATED FILE - DO NOT MODIFY!
# This file generated by Djinni from foo_primitives.djinni
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyBinary, CPyDate, CPyPrimitive, CPyString
from abc import ABCMeta, abstractmethod
from future.utils import with_metaclass
from PyCFFIlib_cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
class FooPrimitives(with_metaclass(ABCMeta)):
@abstractmethod
def set_int8(self, private_int):
raise NotImplementedError
@abstractmethod
def get_int8(self):
raise NotImplementedError
@abstractmethod
def set_int16(self, private_int):
raise NotImplementedError
@abstractmethod
def get_int16(self):
raise NotImplementedError
@abstractmethod
def set_int32(self, private_int):
raise NotImplementedError
@abstractmethod
def get_int32(self):
raise NotImplementedError
@abstractmethod
def set_int64(self, private_int):
raise NotImplementedError
@abstractmethod
def get_int64(self):
raise NotImplementedError
@abstractmethod
def set_float(self, private_float):
raise NotImplementedError
@abstractmethod
def get_float(self):
raise NotImplementedError
@abstractmethod
def set_double(self, private_double):
raise NotImplementedError
@abstractmethod
def get_double(self):
raise NotImplementedError
@abstractmethod
def set_bool(self, private_bool):
raise NotImplementedError
@abstractmethod
def get_bool(self):
raise NotImplementedError
@abstractmethod
def set_binary(self, private_binary):
raise NotImplementedError
@abstractmethod
def get_binary(self):
raise NotImplementedError
@abstractmethod
def set_string(self, private_string):
raise NotImplementedError
@abstractmethod
def get_string(self):
raise NotImplementedError
@abstractmethod
def set_date(self, private_date):
raise NotImplementedError
@abstractmethod
def get_date(self):
raise NotImplementedError
@staticmethod
def create():
return FooPrimitivesCppProxy.create()
class FooPrimitivesCppProxy(FooPrimitives):
def __init__(self, proxy):
self._is_cpp_proxy = True
self._cpp_impl = proxy
def __del__(self):
if not lib:
return
lib.foo_primitives___wrapper_dec_ref(self._cpp_impl)
def set_int8(self, private_int):
lib.cw__foo_primitives_set_int8(self._cpp_impl, CPyPrimitive.fromPy(private_int))
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_int8(self):
_ret_c = lib.cw__foo_primitives_get_int8(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyPrimitive.toPy(_ret_c)
assert _ret is not None
return _ret
def set_int16(self, private_int):
lib.cw__foo_primitives_set_int16(self._cpp_impl, CPyPrimitive.fromPy(private_int))
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_int16(self):
_ret_c = lib.cw__foo_primitives_get_int16(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyPrimitive.toPy(_ret_c)
assert _ret is not None
return _ret
def set_int32(self, private_int):
lib.cw__foo_primitives_set_int32(self._cpp_impl, CPyPrimitive.fromPy(private_int))
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_int32(self):
_ret_c = lib.cw__foo_primitives_get_int32(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyPrimitive.toPy(_ret_c)
assert _ret is not None
return _ret
def set_int64(self, private_int):
lib.cw__foo_primitives_set_int64(self._cpp_impl, CPyPrimitive.fromPy(private_int))
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_int64(self):
_ret_c = lib.cw__foo_primitives_get_int64(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyPrimitive.toPy(_ret_c)
assert _ret is not None
return _ret
def set_float(self, private_float):
lib.cw__foo_primitives_set_float(self._cpp_impl, CPyPrimitive.fromPy(private_float))
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_float(self):
_ret_c = lib.cw__foo_primitives_get_float(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyPrimitive.toPy(_ret_c)
assert _ret is not None
return _ret
def set_double(self, private_double):
lib.cw__foo_primitives_set_double(self._cpp_impl, CPyPrimitive.fromPy(private_double))
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_double(self):
_ret_c = lib.cw__foo_primitives_get_double(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyPrimitive.toPy(_ret_c)
assert _ret is not None
return _ret
def set_bool(self, private_bool):
lib.cw__foo_primitives_set_bool(self._cpp_impl, CPyPrimitive.fromPy(private_bool))
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_bool(self):
_ret_c = lib.cw__foo_primitives_get_bool(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyPrimitive.toPy(_ret_c)
assert _ret is not None
return _ret
def set_binary(self, private_binary):
with CPyBinary.fromPy(private_binary) as pybin_private_binary:
lib.cw__foo_primitives_set_binary(self._cpp_impl, pybin_private_binary.release_djinni_binary())
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_binary(self):
_ret_c = lib.cw__foo_primitives_get_binary(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyBinary.toPy(_ret_c)
assert _ret is not None
return _ret
def set_string(self, private_string):
with CPyString.fromPy(private_string) as pys_private_string:
lib.cw__foo_primitives_set_string(self._cpp_impl, pys_private_string.release_djinni_string())
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_string(self):
_ret_c = lib.cw__foo_primitives_get_string(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyString.toPy(_ret_c)
assert _ret is not None
return _ret
def set_date(self, private_date):
lib.cw__foo_primitives_set_date(self._cpp_impl, CPyDate.fromPy(private_date))
CPyException.toPyCheckAndRaise(ffi.NULL)
def get_date(self):
_ret_c = lib.cw__foo_primitives_get_date(self._cpp_impl)
CPyException.toPyCheckAndRaise(_ret_c)
_ret = CPyDate.toPy(_ret_c)
assert _ret is not None
return _ret
@staticmethod
def create():
_ret_c = lib.cw__foo_primitives_create()
CPyException.toPyCheckAndRaise(_ret_c)
_ret = FooPrimitivesHelper.toPy(_ret_c)
assert _ret is not None
return _ret
class FooPrimitivesHelper:
c_data_set = MultiSet()
@staticmethod
def toPy(obj):
if obj == ffi.NULL:
return None
return FooPrimitivesCppProxy(obj)
| apache-2.0 |
saashimi/code_guild | wk1/scripts/compress/compressor.py | 2 | 3164 |
def groupby_char(lst):
"""Returns a list of strings containing identical characters.
Takes a list of characters produced by running split on a string.
Groups runs (in order sequences) of identical
characters into string elements in the list.
Parameters
---------
Input:
lst: list
A list of single character strings.
Output:
grouped: list
A list of strings containing grouped characters."""
new_lst = []
count = 1
for i in range(len(lst) - 1): # we range to the second to last index since we're checking if lst[i] == lst[i + 1].
if lst[i] == lst[i + 1]:
count += 1
else:
new_lst.append([lst[i],count]) # Create a lst of lists. Each list contains a character and the count of adjacent identical characters.
count = 1
new_lst.append((lst[-1],count)) # Return the last character (we didn't reach it with our for loop since indexing until second to last).
grouped = [char*count for [char, count] in new_lst]
return grouped
def compress_group(string):
"""Returns a compressed two character string containing a character and a number.
Takes in a string of identical characters and returns the compressed string consisting of the character and the length of the original string.
Example
-------
"AAA"-->"A3"
Parameters:
-----------
Input:
string: str
A string of identical characters.
Output:
------
compressed_str: str
A compressed string of length two containing a character and a number.
"""
return str(string[0]) + str(len(string))
def compress(string):
"""Returns a compressed representation of a string.
Compresses the string by mapping each run of identical characters to a
single character and a count.
Ex.
--
compress('AAABBCDDD')--> 'A3B2C1D3'.
Only compresses string if the compression is shorter than the original string.
Ex.
--
compress('A')--> 'A' # not 'A1'.
Parameters
----------
Input:
string: str
The string to compress
Output:
compressed: str
The compressed representation of the string.
"""
try:
split_str = [char for char in string] # Create list of single characters.
grouped = groupby_char(split_str) # Group characters if characters are identical.
compressed = ''.join( # Compress each element of the grouped list and join to a string.
[compress_group(elem) for elem in grouped])
if len(compressed) < len(string): # Only return compressed if compressed is actually shorter.
return compressed
else:
return string
except IndexError: # If our input string is empty, return an empty string.
return ""
except TypeError: # If we get something that's not compressible (including NoneType) return None.
return None
if __name__ == "__main__":
import sys
print(sys.argv[0])
string = sys.argv[1]
print("string is", string)
print("compression is", compress(string))
| mit |
Perferom/android_external_chromium_org | chrome/common/extensions/docs/server2/permissions_data_source.py | 23 | 3485 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from itertools import ifilter
from operator import itemgetter
from data_source import DataSource
from extensions_paths import PRIVATE_TEMPLATES
import features_utility as features
from future import Gettable, Future
def _ListifyPermissions(permissions):
'''Filter out any permissions that do not have a description or with a name
that ends with Private then sort permissions features by name into a list.
'''
def filter_permissions(perm):
return 'description' in perm and not perm['name'].endswith('Private')
return sorted(
ifilter(filter_permissions, permissions.values()),
key=itemgetter('name'))
def _AddDependencyDescriptions(permissions, api_features):
'''Use |api_features| to determine the dependencies APIs have on permissions.
Add descriptions to |permissions| based on those dependencies.
'''
for name, permission in permissions.iteritems():
# Don't overwrite the description created by expanding a partial template.
if 'partial' in permission or not permission['platforms']:
continue
has_deps = False
if name in api_features:
for dependency in api_features[name].get('dependencies', ()):
if dependency.startswith('permission:'):
has_deps = True
if has_deps:
permission['partial'] = 'permissions/generic_description.html'
class PermissionsDataSource(DataSource):
'''Load and format permissions features to be used by templates.
'''
def __init__(self, server_instance, request):
self._features_bundle = server_instance.features_bundle
self._object_store = server_instance.object_store_creator.Create(
PermissionsDataSource)
self._template_cache = server_instance.compiled_fs_factory.ForTemplates(
server_instance.host_file_system_provider.GetTrunk())
def _CreatePermissionsData(self):
api_features_future = self._features_bundle.GetAPIFeatures()
permission_features_future = self._features_bundle.GetPermissionFeatures()
def resolve():
permission_features = permission_features_future.Get()
_AddDependencyDescriptions(permission_features, api_features_future.Get())
# Turn partial templates into descriptions, ensure anchors are set.
for permission in permission_features.values():
if not 'anchor' in permission:
permission['anchor'] = permission['name']
if 'partial' in permission:
permission['description'] = self._template_cache.GetFromFile('%s/%s' %
(PRIVATE_TEMPLATES, permission['partial'])).Get()
del permission['partial']
def filter_for_platform(permissions, platform):
return _ListifyPermissions(features.Filtered(permissions, platform))
return {
'declare_apps': filter_for_platform(permission_features, 'apps'),
'declare_extensions': filter_for_platform(
permission_features, 'extensions')
}
return Future(delegate=Gettable(resolve))
def _GetCachedPermissionsData(self):
data = self._object_store.Get('permissions_data').Get()
if data is None:
data = self._CreatePermissionsData().Get()
self._object_store.Set('permissions_data', data)
return data
def Cron(self):
return self._CreatePermissionsData()
def get(self, key):
return self._GetCachedPermissionsData().get(key)
| bsd-3-clause |
lewisodriscoll/sasview | src/examples/test_copy_print.py | 3 | 3324 | """
Test application that uses plottools
An application required by the REFL group and mainly test copy and print.
The following is a checklist of functionality to look for while testing:
1- Start the application:
the graph should have theory curve, experimental data, chisq
with a white background.
2- Hovering over any plotted data will highlight the whole data set
or line in yellow.
3- Left-clicking on the graph and dragging will drag the graph.
4- Using the mouse wheel will zoom in and out of the graph.
5- Right-clicking on the graph when no curve is highlighted will
pop up the context menu:
- 'copy image': copy the bitmap of figure to system clipboard
- 'print Setup': setup the size of figure for printing
- 'print preview': preview printer page
- 'print': send figure to system printer.
"""
import wx
from sas.sasgui.plottools.PlotPanel import PlotPanel
from sas.sasgui.plottools.plottables import Graph, Data1D, Theory1D
import sys
sys.platform = 'win95'
import numpy as np
class TestPlotPanel(PlotPanel):
def __init__(self, parent, id = -1,
color = None,
dpi = None,
**kwargs):
PlotPanel.__init__(self, parent, id=id, color=color,
dpi=dpi, **kwargs)
# Keep track of the parent Frame
self.parent = parent
# Internal list of plottable names (because graph
# doesn't have a dictionary of handles for the plottables)
self.plots = {}
def onContextMenu(self, event):
"""
Default context menu for a plot panel
"""
wxID_Copy = wx.NewId()
wxID_Print = wx.NewId()
wxID_PrintPreview = wx.NewId()
wxID_PrintSetup = wx.NewId()
_menu = wx.Menu()
_menu.Append(wxID_Copy,'&Copy Image', 'Copy image to Clipboard')
wx.EVT_MENU(self, wxID_Copy, self.OnCopyFigureMenu)
_menu.AppendSeparator()
_menu.Append(wxID_PrintSetup, '&Print setup')
wx.EVT_MENU(self, wxID_PrintSetup, self.onPrinterSetup)
_menu.Append(wxID_PrintPreview, '&Print preview ')
wx.EVT_MENU(self, wxID_PrintPreview, self.onPrinterPreview)
_menu.Append(wxID_Print, '&Print ')
wx.EVT_MENU(self, wxID_Print, self.onPrint)
pos = event.GetPosition()
pos = self.ScreenToClient(pos)
self.PopupMenu(_menu, pos)
# ---------------------------------------------------------------
def sample_graph():
# Construct a simple graph
x = np.linspace(0,2.0, 50)
y = np.sin(2*np.pi*x*2.8)
dy = np.sqrt(100*np.abs(y))/100
data = Data1D(x,y,dy=dy)
data.xaxis('distance', 'm')
data.yaxis('time', 's')
graph = Graph()
graph.add(data)
graph.add( Theory1D(x,y,dy=dy))
graph.title( 'Test Copy and Print Image' )
return graph
def demo_plotter(graph):
# Make a frame to show it
app = wx.PySimpleApp()
frame = wx.Frame(None,-1,'Plottables')
plotter = TestPlotPanel(frame)
frame.Show()
# render the graph to the pylab plotter
graph.render(plotter)
app.MainLoop()
if __name__ == "__main__":
pass
demo_plotter(sample_graph())
| bsd-3-clause |
lanbing510/GTDWeb | django/views/generic/detail.py | 103 | 6696 | from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
class SingleObjectMixin(ContextMixin):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
query_pk_and_slug = False
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset.all()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
return obj._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = {}
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(SingleObjectMixin, self).get_context_data(**context)
class BaseDetailView(SingleObjectMixin, View):
"""
A base view for displaying a single object
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. May not be
called if render_to_response is overridden. Returns the following list:
* the value of ``template_name`` on the view (if provided)
* the contents of the ``template_name_field`` field on the
object instance that the view is operating upon (if available)
* ``<app_label>/<model_name><template_name_suffix>.html``
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if isinstance(self.object, models.Model):
names.append("%s/%s%s.html" % (
self.object._meta.app_label,
self.object._meta.model_name,
self.template_name_suffix
))
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.model_name,
self.template_name_suffix
))
# If we still haven't managed to find any template names, we should
# re-raise the ImproperlyConfigured to alert the user.
if not names:
raise
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
| gpl-2.0 |
3quarterstack/simple_blog | django/contrib/auth/tests/__init__.py | 101 | 1191 | from django.contrib.auth.tests.custom_user import *
from django.contrib.auth.tests.auth_backends import *
from django.contrib.auth.tests.basic import *
from django.contrib.auth.tests.context_processors import *
from django.contrib.auth.tests.decorators import *
from django.contrib.auth.tests.forms import *
from django.contrib.auth.tests.remote_user import *
from django.contrib.auth.tests.management import *
from django.contrib.auth.tests.models import *
from django.contrib.auth.tests.handlers import *
from django.contrib.auth.tests.hashers import *
from django.contrib.auth.tests.signals import *
from django.contrib.auth.tests.tokens import *
from django.contrib.auth.tests.views import *
# The password for the fixture data users is 'password'
from django.dispatch import receiver
from django.test.signals import setting_changed
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
from django.db.models.manager import ensure_default_manager
from django.contrib.auth.models import User
# Reset User manager
setattr(User, 'objects', User._default_manager)
ensure_default_manager(User)
| mit |
reeshupatel/demo | keystone/contrib/admin_crud/core.py | 13 | 8493 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone import assignment
from keystone import catalog
from keystone.common import extension
from keystone.common import wsgi
from keystone import identity
extension.register_admin_extension(
'OS-KSADM', {
'name': 'OpenStack Keystone Admin',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-KSADM/v1.0',
'alias': 'OS-KSADM',
'updated': '2013-07-11T17:14:00-00:00',
'description': 'OpenStack extensions to Keystone v2.0 API '
'enabling Administrative Operations.',
'links': [
{
'rel': 'describedby',
# TODO(dolph): link needs to be revised after
# bug 928059 merges
'type': 'text/html',
'href': 'https://github.com/openstack/identity-api',
}
]})
class CrudExtension(wsgi.ExtensionRouter):
"""Previously known as the OS-KSADM extension.
Provides a bunch of CRUD operations for internal data types.
"""
def add_routes(self, mapper):
tenant_controller = assignment.controllers.Tenant()
user_controller = identity.controllers.User()
role_controller = assignment.controllers.Role()
service_controller = catalog.controllers.Service()
endpoint_controller = catalog.controllers.Endpoint()
# Tenant Operations
mapper.connect(
'/tenants',
controller=tenant_controller,
action='create_project',
conditions=dict(method=['POST']))
mapper.connect(
'/tenants/{tenant_id}',
controller=tenant_controller,
action='update_project',
conditions=dict(method=['PUT', 'POST']))
mapper.connect(
'/tenants/{tenant_id}',
controller=tenant_controller,
action='delete_project',
conditions=dict(method=['DELETE']))
mapper.connect(
'/tenants/{tenant_id}/users',
controller=tenant_controller,
action='get_project_users',
conditions=dict(method=['GET']))
# User Operations
mapper.connect(
'/users',
controller=user_controller,
action='get_users',
conditions=dict(method=['GET']))
mapper.connect(
'/users',
controller=user_controller,
action='create_user',
conditions=dict(method=['POST']))
# NOTE(termie): not in diablo
mapper.connect(
'/users/{user_id}',
controller=user_controller,
action='update_user',
conditions=dict(method=['PUT']))
mapper.connect(
'/users/{user_id}',
controller=user_controller,
action='delete_user',
conditions=dict(method=['DELETE']))
# COMPAT(diablo): the copy with no OS-KSADM is from diablo
mapper.connect(
'/users/{user_id}/password',
controller=user_controller,
action='set_user_password',
conditions=dict(method=['PUT']))
mapper.connect(
'/users/{user_id}/OS-KSADM/password',
controller=user_controller,
action='set_user_password',
conditions=dict(method=['PUT']))
# COMPAT(diablo): the copy with no OS-KSADM is from diablo
mapper.connect(
'/users/{user_id}/tenant',
controller=user_controller,
action='update_user',
conditions=dict(method=['PUT']))
mapper.connect(
'/users/{user_id}/OS-KSADM/tenant',
controller=user_controller,
action='update_user',
conditions=dict(method=['PUT']))
# COMPAT(diablo): the copy with no OS-KSADM is from diablo
mapper.connect(
'/users/{user_id}/enabled',
controller=user_controller,
action='set_user_enabled',
conditions=dict(method=['PUT']))
mapper.connect(
'/users/{user_id}/OS-KSADM/enabled',
controller=user_controller,
action='set_user_enabled',
conditions=dict(method=['PUT']))
# User Roles
mapper.connect(
'/users/{user_id}/roles/OS-KSADM/{role_id}',
controller=role_controller,
action='add_role_to_user',
conditions=dict(method=['PUT']))
mapper.connect(
'/users/{user_id}/roles/OS-KSADM/{role_id}',
controller=role_controller,
action='remove_role_from_user',
conditions=dict(method=['DELETE']))
# COMPAT(diablo): User Roles
mapper.connect(
'/users/{user_id}/roleRefs',
controller=role_controller,
action='get_role_refs',
conditions=dict(method=['GET']))
mapper.connect(
'/users/{user_id}/roleRefs',
controller=role_controller,
action='create_role_ref',
conditions=dict(method=['POST']))
mapper.connect(
'/users/{user_id}/roleRefs/{role_ref_id}',
controller=role_controller,
action='delete_role_ref',
conditions=dict(method=['DELETE']))
# User-Tenant Roles
mapper.connect(
'/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}',
controller=role_controller,
action='add_role_to_user',
conditions=dict(method=['PUT']))
mapper.connect(
'/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}',
controller=role_controller,
action='remove_role_from_user',
conditions=dict(method=['DELETE']))
# Service Operations
mapper.connect(
'/OS-KSADM/services',
controller=service_controller,
action='get_services',
conditions=dict(method=['GET']))
mapper.connect(
'/OS-KSADM/services',
controller=service_controller,
action='create_service',
conditions=dict(method=['POST']))
mapper.connect(
'/OS-KSADM/services/{service_id}',
controller=service_controller,
action='delete_service',
conditions=dict(method=['DELETE']))
mapper.connect(
'/OS-KSADM/services/{service_id}',
controller=service_controller,
action='get_service',
conditions=dict(method=['GET']))
# Endpoint Templates
mapper.connect(
'/endpoints',
controller=endpoint_controller,
action='get_endpoints',
conditions=dict(method=['GET']))
mapper.connect(
'/endpoints',
controller=endpoint_controller,
action='create_endpoint',
conditions=dict(method=['POST']))
mapper.connect(
'/endpoints/{endpoint_id}',
controller=endpoint_controller,
action='delete_endpoint',
conditions=dict(method=['DELETE']))
# Role Operations
mapper.connect(
'/OS-KSADM/roles',
controller=role_controller,
action='create_role',
conditions=dict(method=['POST']))
mapper.connect(
'/OS-KSADM/roles',
controller=role_controller,
action='get_roles',
conditions=dict(method=['GET']))
mapper.connect(
'/OS-KSADM/roles/{role_id}',
controller=role_controller,
action='get_role',
conditions=dict(method=['GET']))
mapper.connect(
'/OS-KSADM/roles/{role_id}',
controller=role_controller,
action='delete_role',
conditions=dict(method=['DELETE']))
| apache-2.0 |
lduarte1991/edx-platform | openedx/core/djangoapps/content/block_structure/block_structure.py | 1 | 30213 | """
Module with family of classes for block structures.
BlockStructure - responsible for block existence and relations.
BlockStructureBlockData - responsible for block & transformer data.
BlockStructureModulestoreData - responsible for xBlock data.
The following internal data structures are implemented:
_BlockRelations - Data structure for a single block's relations.
_BlockData - Data structure for a single block's data.
"""
from copy import deepcopy
from functools import partial
from logging import getLogger
from openedx.core.lib.graph_traversals import traverse_topologically, traverse_post_order
from .exceptions import TransformerException
logger = getLogger(__name__) # pylint: disable=invalid-name
# A dictionary key value for storing a transformer's version number.
TRANSFORMER_VERSION_KEY = '_version'
class _BlockRelations(object):
"""
Data structure to encapsulate relationships for a single block,
including its children and parents.
"""
def __init__(self):
# List of usage keys of this block's parents.
# list [UsageKey]
self.parents = []
# List of usage keys of this block's children.
# list [UsageKey]
self.children = []
class BlockStructure(object):
"""
Base class for a block structure. BlockStructures are constructed
using the BlockStructureFactory and then used as the currency across
Transformers.
This base class keeps track of the block structure's root_block_usage_key,
the existence of the blocks, and their parents and children
relationships (graph nodes and edges).
"""
def __init__(self, root_block_usage_key):
# The usage key of the root block for this structure.
# UsageKey
self.root_block_usage_key = root_block_usage_key
# Map of a block's usage key to its block relations. The
# existence of a block in the structure is determined by its
# presence in this map.
# dict {UsageKey: _BlockRelations}
self._block_relations = {}
# Add the root block.
self._add_block(self._block_relations, root_block_usage_key)
def __iter__(self):
"""
The default iterator for a block structure is get_block_keys()
since we need to filter blocks as a list.
A topological traversal can be used to support DAGs.
"""
return self.get_block_keys()
def __len__(self):
return len(self._block_relations)
#--- Block structure relation methods ---#
def get_parents(self, usage_key):
"""
Returns the parents of the block identified by the given
usage_key.
Arguments:
usage_key - The usage key of the block whose parents
are to be returned.
Returns:
[UsageKey] - A list of usage keys of the block's parents.
"""
return self._block_relations[usage_key].parents if usage_key in self else []
def get_children(self, usage_key):
"""
Returns the children of the block identified by the given
usage_key.
Arguments:
usage_key - The usage key of the block whose children
are to be returned.
Returns:
[UsageKey] - A list of usage keys of the block's children.
"""
return self._block_relations[usage_key].children if usage_key in self else []
def set_root_block(self, usage_key):
"""
Sets the given usage key as the new root of the block structure.
Note: This method does *not* prune the rest of the structure. For
performance reasons, it is left to the caller to decide when exactly
to prune.
Arguments:
usage_key - The usage key of the block that is to be set as the
new root of the block structure.
"""
self.root_block_usage_key = usage_key
self._block_relations[usage_key].parents = []
def __contains__(self, usage_key):
"""
Returns whether a block with the given usage_key is in this
block structure.
Arguments:
usage_key - The usage key of the block whose children
are to be returned.
Returns:
bool - Whether or not a block with the given usage_key
is present in this block structure.
"""
return usage_key in self._block_relations
def get_block_keys(self):
"""
Returns the block keys in the block structure.
Returns:
iterator(UsageKey) - An iterator of the usage
keys of all the blocks in the block structure.
"""
return self._block_relations.iterkeys()
#--- Block structure traversal methods ---#
def topological_traversal(
self,
filter_func=None,
yield_descendants_of_unyielded=False,
start_node=None,
):
"""
Performs a topological sort of the block structure and yields
the usage_key of each block as it is encountered.
Arguments:
See the description in
openedx.core.lib.graph_traversals.traverse_topologically.
Returns:
generator - A generator object created from the
traverse_topologically method.
"""
return traverse_topologically(
start_node=start_node or self.root_block_usage_key,
get_parents=self.get_parents,
get_children=self.get_children,
filter_func=filter_func,
yield_descendants_of_unyielded=yield_descendants_of_unyielded,
)
def post_order_traversal(
self,
filter_func=None,
start_node=None,
):
"""
Performs a post-order sort of the block structure and yields
the usage_key of each block as it is encountered.
Arguments:
See the description in
openedx.core.lib.graph_traversals.traverse_post_order.
Returns:
generator - A generator object created from the
traverse_post_order method.
"""
return traverse_post_order(
start_node=start_node or self.root_block_usage_key,
get_children=self.get_children,
filter_func=filter_func,
)
#--- Internal methods ---#
# To be used within the block_structure framework or by tests.
def _prune_unreachable(self):
"""
Mutates this block structure by removing any unreachable blocks.
"""
# Create a new block relations map to store only those blocks
# that are still linked
pruned_block_relations = {}
old_block_relations = self._block_relations
# Build the structure from the leaves up by doing a post-order
# traversal of the old structure, thereby encountering only
# reachable blocks.
for block_key in self.post_order_traversal():
# If the block is in the old structure,
if block_key in old_block_relations:
# Add it to the new pruned structure
self._add_block(pruned_block_relations, block_key)
# Add a relationship to only those old children that
# were also added to the new pruned structure.
for child in old_block_relations[block_key].children:
if child in pruned_block_relations:
self._add_to_relations(pruned_block_relations, block_key, child)
# Replace this structure's relations with the newly pruned one.
self._block_relations = pruned_block_relations
def _add_relation(self, parent_key, child_key):
"""
Adds a parent to child relationship in this block structure.
Arguments:
parent_key (UsageKey) - Usage key of the parent block.
child_key (UsageKey) - Usage key of the child block.
"""
self._add_to_relations(self._block_relations, parent_key, child_key)
@staticmethod
def _add_to_relations(block_relations, parent_key, child_key):
"""
Adds a parent to child relationship in the given block
relations map.
Arguments:
block_relations (dict({UsageKey: _BlockRelations})) -
Internal map of a block's usage key to its
parents/children relations.
parent_key (UsageKey) - Usage key of the parent block.
child_key (UsageKey) - Usage key of the child block.
"""
BlockStructure._add_block(block_relations, parent_key)
BlockStructure._add_block(block_relations, child_key)
block_relations[child_key].parents.append(parent_key)
block_relations[parent_key].children.append(child_key)
@staticmethod
def _add_block(block_relations, usage_key):
"""
Adds the given usage_key to the given block_relations map.
Arguments:
block_relations (dict({UsageKey: _BlockRelations})) -
Internal map of a block's usage key to its
parents/children relations.
usage_key (UsageKey) - Usage key of the block that is to
be added to the given block_relations.
"""
if usage_key not in block_relations:
block_relations[usage_key] = _BlockRelations()
class FieldData(object):
"""
Data structure to encapsulate collected fields.
"""
def class_field_names(self):
"""
Returns list of names of fields that are defined directly
on the class. Can be overridden by subclasses. All other
fields are assumed to be stored in the self.fields dict.
"""
return ['fields']
def __init__(self):
# Map of field name to the field's value for this block.
# dict {string: any picklable type}
self.fields = {}
def __getattr__(self, field_name):
if self._is_own_field(field_name):
return super(FieldData, self).__getattr__(field_name)
try:
return self.fields[field_name]
except KeyError:
raise AttributeError("Field {0} does not exist".format(field_name))
def __setattr__(self, field_name, field_value):
if self._is_own_field(field_name):
return super(FieldData, self).__setattr__(field_name, field_value)
else:
self.fields[field_name] = field_value
def __delattr__(self, field_name):
if self._is_own_field(field_name):
return super(FieldData, self).__delattr__(field_name)
else:
del self.fields[field_name]
def _is_own_field(self, field_name):
"""
Returns whether the given field_name is the name of an
actual field of this class.
"""
return field_name in self.class_field_names()
class TransformerData(FieldData):
"""
Data structure to encapsulate collected data for a transformer.
"""
pass
class TransformerDataMap(dict):
"""
A map of Transformer name to its corresponding TransformerData.
The map can be accessed by the Transformer's name or the
Transformer's class type.
"""
def __getitem__(self, key):
key = self._translate_key(key)
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
key = self._translate_key(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
key = self._translate_key(key)
dict.__delitem__(self, key)
def get_or_create(self, key):
"""
Returns the TransformerData associated with the given
key. If not found, creates and returns a new TransformerData
and maps it to the given key.
"""
try:
return self[key]
except KeyError:
new_transformer_data = TransformerData()
self[key] = new_transformer_data
return new_transformer_data
def _translate_key(self, key):
"""
Allows the given key to be either the transformer's class or name,
always returning the transformer's name. This allows
TransformerDataMap to be accessed in either of the following ways:
map[TransformerClass] or
map['transformer_name']
"""
try:
return key.name()
except AttributeError:
return key
class BlockData(FieldData):
"""
Data structure to encapsulate collected data for a single block.
"""
def class_field_names(self):
return super(BlockData, self).class_field_names() + ['location', 'transformer_data']
def __init__(self, usage_key):
super(BlockData, self).__init__()
# Location (or usage key) of the block.
self.location = usage_key
# Map of transformer name to its block-specific data.
self.transformer_data = TransformerDataMap()
class BlockStructureBlockData(BlockStructure):
"""
Subclass of BlockStructure that is responsible for managing block
and transformer data.
"""
# The latest version of the data structure of this class. Incrementally
# update this value whenever the data structure changes. Dependent storage
# layers can then use this value when serializing/deserializing block
# structures, and invalidating any previously cached/stored data.
VERSION = 2
def __init__(self, root_block_usage_key):
super(BlockStructureBlockData, self).__init__(root_block_usage_key)
# Map of a block's usage key to its collected data, including
# its xBlock fields and block-specific transformer data.
# dict {UsageKey: BlockData}
self._block_data_map = {}
# Map of a transformer's name to its non-block-specific data.
self.transformer_data = TransformerDataMap()
def copy(self):
"""
Returns a new instance of BlockStructureBlockData with a
deep-copy of this instance's contents.
"""
from .factory import BlockStructureFactory
return BlockStructureFactory.create_new(
self.root_block_usage_key,
deepcopy(self._block_relations),
deepcopy(self.transformer_data),
deepcopy(self._block_data_map),
)
def iteritems(self):
"""
Returns iterator of (UsageKey, BlockData) pairs for all
blocks in the BlockStructure.
"""
return self._block_data_map.iteritems()
def itervalues(self):
"""
Returns iterator of BlockData for all blocks in the
BlockStructure.
"""
return self._block_data_map.itervalues()
def __getitem__(self, usage_key):
"""
Returns the BlockData associated with the given key.
"""
return self._block_data_map[usage_key]
def get_xblock_field(self, usage_key, field_name, default=None):
"""
Returns the collected value of the xBlock field for the
requested block for the requested field_name; returns default if
not found.
Arguments:
usage_key (UsageKey) - Usage key of the block whose xBlock
field is requested.
field_name (string) - The name of the field that is
requested.
default (any type) - The value to return if a field value is
not found.
"""
block_data = self._block_data_map.get(usage_key)
return getattr(block_data, field_name, default) if block_data else default
def get_transformer_data(self, transformer, key, default=None):
"""
Returns the value associated with the given key from the given
transformer's data dictionary; returns default if not found.
Arguments:
transformer (BlockStructureTransformer) - The transformer
whose collected data is requested.
key (string) - A dictionary key to the transformer's data
that is requested.
"""
try:
return getattr(self.transformer_data[transformer], key, default)
except KeyError:
return default
def set_transformer_data(self, transformer, key, value):
"""
Updates the given transformer's data dictionary with the given
key and value.
Arguments:
transformer (BlockStructureTransformer) - The transformer
whose data is to be updated.
key (string) - A dictionary key to the transformer's data.
value (any picklable type) - The value to associate with the
given key for the given transformer's data.
"""
setattr(self.transformer_data.get_or_create(transformer), key, value)
def get_transformer_block_data(self, usage_key, transformer):
"""
Returns the TransformerData for the given
transformer for the block identified by the given usage_key.
Raises KeyError if not found.
Arguments:
usage_key (UsageKey) - Usage key of the block whose
transformer data is requested.
transformer (BlockStructureTransformer) - The transformer
whose dictionary data is requested.
"""
return self._block_data_map[usage_key].transformer_data[transformer]
def get_transformer_block_field(self, usage_key, transformer, key, default=None):
"""
Returns the value associated with the given key for the given
transformer for the block identified by the given usage_key;
returns default if not found.
Arguments:
usage_key (UsageKey) - Usage key of the block whose
transformer data is requested.
transformer (BlockStructureTransformer) - The transformer
whose dictionary data is requested.
key (string) - A dictionary key to the transformer's data
that is requested.
default (any type) - The value to return if a dictionary
entry is not found.
"""
try:
transformer_data = self.get_transformer_block_data(usage_key, transformer)
except KeyError:
return default
return getattr(transformer_data, key, default)
def set_transformer_block_field(self, usage_key, transformer, key, value):
"""
Updates the given transformer's data dictionary with the given
key and value for the block identified by the given usage_key.
Arguments:
usage_key (UsageKey) - Usage key of the block whose
transformer data is to be updated.
transformer (BlockStructureTransformer) - The transformer
whose data is to be updated.
key (string) - A dictionary key to the transformer's data.
value (any picklable type) - The value to associate with the
given key for the given transformer's data for the
requested block.
"""
setattr(
self._get_or_create_block(usage_key).transformer_data.get_or_create(transformer),
key,
value,
)
def remove_transformer_block_field(self, usage_key, transformer, key):
"""
Deletes the given transformer's entire data dict for the
block identified by the given usage_key.
Arguments:
usage_key (UsageKey) - Usage key of the block whose
transformer data is to be deleted.
transformer (BlockStructureTransformer) - The transformer
whose data entry is to be deleted.
"""
try:
transformer_block_data = self.get_transformer_block_data(usage_key, transformer)
delattr(transformer_block_data, key)
except (AttributeError, KeyError):
pass
def remove_block(self, usage_key, keep_descendants):
"""
Removes the block identified by the usage_key and all of its
related data from the block structure. If descendants of the
removed block are to be kept, the structure's relations are
updated to reconnect the block's parents with its children.
Note: While the immediate relations of the block are updated
(removed), all descendants of the block will remain in the
structure unless the _prune_unreachable method is called.
Arguments:
usage_key (UsageKey) - Usage key of the block that is to be
removed.
keep_descendants (bool) - If True, the block structure's
relations (graph edges) are updated such that the
removed block's children become children of the
removed block's parents.
"""
children = self._block_relations[usage_key].children
parents = self._block_relations[usage_key].parents
# Remove block from its children.
for child in children:
self._block_relations[child].parents.remove(usage_key)
# Remove block from its parents.
for parent in parents:
self._block_relations[parent].children.remove(usage_key)
# Remove block.
self._block_relations.pop(usage_key, None)
self._block_data_map.pop(usage_key, None)
# Recreate the graph connections if descendants are to be kept.
if keep_descendants:
for child in children:
for parent in parents:
self._add_relation(parent, child)
def create_universal_filter(self):
"""
Returns a filter function that always returns True for all blocks.
"""
return lambda block_key: True
def create_removal_filter(self, removal_condition, keep_descendants=False):
"""
Returns a filter function that automatically removes blocks that satisfy
the removal_condition.
Arguments:
removal_condition ((usage_key)->bool) - A function that
takes a block's usage key as input and returns whether
or not to remove that block from the block structure.
keep_descendants (bool) - See the description in
remove_block.
"""
return partial(
self.retain_or_remove,
removal_condition=removal_condition,
keep_descendants=keep_descendants,
)
def retain_or_remove(self, block_key, removal_condition, keep_descendants=False):
"""
Removes the given block if it satisfies the removal_condition.
Returns True if the block was retained, and False if the block
was removed.
Arguments:
block_key (usage_key) - Usage key of the block.
removal_condition ((usage_key)->bool) - A function that
takes a block's usage key as input and returns whether
or not to remove that block from the block structure.
keep_descendants (bool) - See the description in
remove_block.
"""
if removal_condition(block_key):
self.remove_block(block_key, keep_descendants)
return False
return True
def remove_block_traversal(self, removal_condition, keep_descendants=False):
"""
A higher-order function that traverses the block structure
using topological sort and removes all blocks satisfying the given
removal_condition.
Arguments:
removal_condition ((usage_key)->bool) - A function that
takes a block's usage key as input and returns whether
or not to remove that block from the block structure.
keep_descendants (bool) - See the description in
remove_block.
"""
self.filter_topological_traversal(
filter_func=self.create_removal_filter(
removal_condition, keep_descendants
)
)
def filter_topological_traversal(self, filter_func, **kwargs):
"""
A higher-order function that traverses the block structure
using topological sort and applies the given filter.
Arguments:
filter_func ((usage_key)->bool) - Function that returns
whether or not to yield the given block key.
If None, the True function is assumed.
kwargs (dict) - Optional keyword arguments to be forwarded
to topological_traversal.
"""
# Note: For optimization, we remove blocks using the filter
# function, since the graph traversal method can skip over
# descendants that are unyielded. However, note that the
# optimization is not currently present because of DAGs,
# but it will be as soon as we remove support for DAGs.
for _ in self.topological_traversal(filter_func=filter_func, **kwargs):
pass
#--- Internal methods ---#
# To be used within the block_structure framework or by tests.
def _get_transformer_data_version(self, transformer):
"""
Returns the version number stored for the given transformer.
Arguments:
transformer (BlockStructureTransformer) - The transformer
whose stored version is requested.
"""
return self.get_transformer_data(transformer, TRANSFORMER_VERSION_KEY, 0)
def _add_transformer(self, transformer):
"""
Adds the given transformer to the block structure by recording
its current version number.
"""
if transformer.READ_VERSION == 0 or transformer.WRITE_VERSION == 0:
raise TransformerException('Version attributes are not set on transformer {0}.', transformer.name())
self.set_transformer_data(transformer, TRANSFORMER_VERSION_KEY, transformer.WRITE_VERSION)
def _get_or_create_block(self, usage_key):
"""
Returns the BlockData associated with the given usage_key.
If not found, creates and returns a new BlockData and
maps it to the given key.
"""
try:
return self._block_data_map[usage_key]
except KeyError:
block_data = BlockData(usage_key)
self._block_data_map[usage_key] = block_data
return block_data
class BlockStructureModulestoreData(BlockStructureBlockData):
"""
Subclass of BlockStructureBlockData that is responsible for managing
xBlocks and corresponding functionality that should only be called
during the Collect phase.
Note: Although this class interface uses xBlock terminology, it is
designed and implemented generically so it can work with any
interface and implementation of an xBlock.
"""
def __init__(self, root_block_usage_key):
super(BlockStructureModulestoreData, self).__init__(root_block_usage_key)
# Map of a block's usage key to its instantiated xBlock.
# dict {UsageKey: XBlock}
self._xblock_map = {}
# Set of xBlock field names that have been requested for
# collection.
# set(string)
self._requested_xblock_fields = set()
def request_xblock_fields(self, *field_names):
"""
Records request for collecting data for the given xBlock fields.
A Transformer should call this method when it needs to collect
data for a common xBlock field that may also be used by other
transformers. This minimizes storage usage across transformers.
Contrast this with each transformer collecting the same xBlock
data within its own transformer data storage.
Arguments:
field_names (list(string)) - A list of names of common
xBlock fields whose values should be collected.
"""
self._requested_xblock_fields.update(set(field_names))
def get_xblock(self, usage_key):
"""
Returns the instantiated xBlock for the given usage key.
Arguments:
usage_key (UsageKey) - Usage key of the block whose
xBlock object is to be returned.
"""
return self._xblock_map[usage_key]
#--- Internal methods ---#
# To be used within the block_structure framework or by tests.
def _add_xblock(self, usage_key, xblock):
"""
Associates the given xBlock object with the given usage_key.
Arguments:
usage_key (UsageKey) - Usage key of the given xBlock. This
value is passed in separately as opposed to retrieving
it from the given xBlock since this interface is
agnostic to and decoupled from the xBlock interface.
xblock (XBlock) - An instantiated XBlock object that is
to be stored for later access.
"""
self._xblock_map[usage_key] = xblock
def _collect_requested_xblock_fields(self):
"""
Iterates through all instantiated xBlocks that were added and
collects all xBlock fields that were requested.
"""
for xblock_usage_key, xblock in self._xblock_map.iteritems():
block_data = self._get_or_create_block(xblock_usage_key)
for field_name in self._requested_xblock_fields:
self._set_xblock_field(block_data, xblock, field_name)
def _set_xblock_field(self, block_data, xblock, field_name):
"""
Updates the given block's xBlock fields data with the xBlock
value for the given field name.
Arguments:
block_data (BlockData) - A BlockStructure BlockData
object.
xblock (XBlock) - An instantiated XBlock object whose
field is being accessed and collected for later
retrieval.
field_name (string) - The name of the xBlock field that is
being collected and stored.
"""
if hasattr(xblock, field_name):
setattr(block_data, field_name, getattr(xblock, field_name))
| agpl-3.0 |
djtaylor/cloudscape-DEPRECATED | python/cloudscape/engine/api/app/formula/models.py | 2 | 4396 | import json
from django.db import models
class DBFormulaTemplates(models.Model):
"""
Database model for storing formula templates.
"""
# Formula template columns
formula = models.ForeignKey('formula.DBFormulaDetails', to_field='uuid', db_column='formula')
template_name = models.CharField(max_length=128)
template_file = models.TextField()
size = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# Custom model metadata
class Meta:
db_table = 'formula_templates'
class DBFormulaQuerySet(models.query.QuerySet):
"""
Custom queryset manager for the DBFormulaDetails model. This allows customization of the returned
QuerySet when extracting formula details from the database.
"""
def __init__(self, *args, **kwargs):
super(DBFormulaQuerySet, self).__init__(*args, **kwargs)
# Timestamp format
self.tstamp = '%Y-%m-%d %H:%M:%S'
def _extract(self, formula):
"""
Extract formula details and templates.
"""
# Stringify each entry
for k,v in formula.iteritems():
formula[k] = str(v)
# Get formula templates
formula['templates'] = [x['template_name'] for x in list(DBFormulaTemplates.objects.filter(formula=formula['uuid']).values())]
# Extract the manifest
formula['manifest'] = json.loads(formula['manifest'])
# Return the updated formula
return formula
def values(self, *fields):
"""
Wrapper for the default values() method.
"""
# Store the initial results
_r = super(DBFormulaQuerySet, self).values(*fields)
# Extract the formula details
for _f in _r:
_f = self._extract(_f)
# Return the constructed formula results
return _r
class DBFormulaManager(models.Manager):
"""
Custom objects manager for the DBFormulaDetails model. Acts as a link between the main DBFormulaDetails
model and the custom DBFormulaQuerySet model.
"""
def __init__(self, *args, **kwargs):
super(DBFormulaManager, self).__init__()
def get_queryset(self, *args, **kwargs):
"""
Wrapper method for the internal get_queryset() method.
"""
return DBFormulaQuerySet(model=self.model)
"""
CloudScape Deployment Formula
"""
class DBFormulaDetails(models.Model):
# Formulas table columns
uuid = models.CharField(max_length=36, unique=True)
name = models.CharField(max_length=128, unique=True)
label = models.CharField(max_length=128)
desc = models.TextField()
manifest = models.TextField()
type = models.CharField(max_length=24)
internal = models.NullBooleanField()
locked = models.NullBooleanField()
locked_by = models.CharField(max_length=64, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# Custom objects manager
objects = DBFormulaManager()
# Custom model metadata
class Meta:
db_table = 'formula_details'
"""
CloudScape Formula Events
"""
class DBFormulaEvents(models.Model):
# Formula template columns
event_id = models.CharField(max_length=128)
event_meta = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# Custom model metadata
class Meta:
db_table = 'formula_events'
"""
CloudScape Formula Runtime Registry
"""
class DBFormulaRegistry(models.Model):
# Formula registry columns
formula = models.CharField(max_length=128)
uuid = models.CharField(max_length=36, unique=True)
host = models.CharField(max_length=36)
checksum = models.CharField(max_length=64)
key = models.CharField(max_length=64)
verified = models.BooleanField()
decrypted = models.BooleanField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# Custom model metadata
class Meta:
db_table = 'formula_registry' | gpl-3.0 |
mattclark/osf.io | addons/base/apps.py | 14 | 5534 | import os
import glob
import mimetypes
from django.apps import AppConfig
from mako.lookup import TemplateLookup
from framework.routing import process_rules
from framework.flask import app
from website import settings
from website.util import rubeus
def _is_image(filename):
mtype, _ = mimetypes.guess_type(filename)
return mtype and mtype.startswith('image')
NODE_SETTINGS_TEMPLATE_DEFAULT = os.path.join(
settings.TEMPLATES_PATH,
'project',
'addon',
'node_settings_default.mako',
)
USER_SETTINGS_TEMPLATE_DEFAULT = os.path.join(
settings.TEMPLATES_PATH,
'profile',
'user_settings_default.mako',
)
def generic_root_folder(addon_short_name):
def _root_folder(node_settings, auth, **kwargs):
"""Return the Rubeus/HGrid-formatted response for the root folder only."""
# Quit if node settings does not have authentication
if not node_settings.has_auth or not node_settings.folder_id:
return None
node = node_settings.owner
root = rubeus.build_addon_root(
node_settings=node_settings,
name=node_settings.fetch_folder_name(),
permissions=auth,
nodeUrl=node.url,
nodeApiUrl=node.api_url,
private_key=kwargs.get('view_only', None),
)
return [root]
_root_folder.__name__ = '{0}_root_folder'.format(addon_short_name)
return _root_folder
class BaseAddonAppConfig(AppConfig):
name = 'addons.base'
label = 'addons_base'
actions = tuple()
user_settings = None
node_settings = None
node_settings_template = NODE_SETTINGS_TEMPLATE_DEFAULT
user_settings_template = USER_SETTINGS_TEMPLATE_DEFAULT
views = []
added_default = []
added_mandatory = []
include_js = {} # TODO: Deprecate these elsewhere and remove
include_css = {} # TODO: Deprecate these elsewhere and remove
configs = []
has_hgrid_files = False
get_hgrid_data = None
max_file_size = None
accept_extensions = True
# NOTE: Subclasses may make routes a property to avoid import errors
routes = []
owners = []
categories = []
def __init__(self, *args, **kwargs):
ret = super(BaseAddonAppConfig, self).__init__(*args, **kwargs).__init__()
# Build template lookup
paths = [settings.TEMPLATES_PATH]
if self.user_settings_template:
paths.append(os.path.dirname(self.user_settings_template))
if self.node_settings_template:
paths.append(os.path.dirname(self.node_settings_template))
template_dirs = list(
set(
[
path
for path in paths
if os.path.exists(path)
]
)
)
if template_dirs:
self.template_lookup = TemplateLookup(
directories=template_dirs,
default_filters=[
'unicode', # default filter; must set explicitly when overriding
'temp_ampersand_fixer',
# FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. See [#OSF-4432]
'h',
],
imports=[
'from website.util.sanitize import temp_ampersand_fixer',
# FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. See [#OSF-4432]
]
)
else:
self.template_lookup = None
return ret
@property
def full_name(self):
raise NotImplementedError
@property
def short_name(self):
raise NotImplementedError
@property
def icon(self):
try:
return self._icon
except Exception:
static_path = os.path.join('addons', self.short_name, 'static')
static_files = glob.glob(os.path.join(static_path, 'comicon.*'))
image_files = [
os.path.split(filename)[1]
for filename in static_files
if _is_image(filename)
]
if len(image_files) == 1:
self._icon = image_files[0]
else:
self._icon = None
return self._icon
@property
def icon_url(self):
return self._static_url(self.icon) if self.icon else None
def _static_url(self, filename):
"""Build static URL for file; use the current addon if relative path,
else the global static directory.
:param str filename: Local path to file
:return str: Static URL for file
"""
if filename.startswith('/'):
return filename
return '/static/addons/{addon}/{filename}'.format(
addon=self.short_name,
filename=filename,
)
def to_json(self):
return {
'short_name': self.short_name,
'full_name': self.full_name,
'capabilities': self.short_name in settings.ADDON_CAPABILITIES,
'addon_capabilities': settings.ADDON_CAPABILITIES.get(self.short_name),
'icon': self.icon_url,
'has_page': 'page' in self.views,
'has_widget': 'widget' in self.views,
}
# Override Appconfig
def ready(self):
# Set up Flask routes
for route_group in self.routes:
process_rules(app, **route_group)
| apache-2.0 |
udoprog/ontv | ontv/__init__.py | 1 | 9501 | import os
import argparse
import logging
import blessings
import random
import contextlib
import shutil
import datetime
import sys
from .api import TheTVDBApi
from .action.sync import setup as sync_setup, action as sync_action
from .action.search import setup as search_setup
from .action.add import setup as add_setup
from .action.remove import setup as remove_setup
from .action.list import setup as list_setup
from .action.show import setup as show_setup
from .action.mark import setup as mark_setup
from .action.next import setup as next_setup
from .action.compact import setup as compact_setup
from .dao import SeriesDAO
from .utils import write_yaml
from .utils import read_yaml
from .database import open_database
__version__ = "0.10.1"
# current scheme version.
SCHEME_VERSION = 2
log = logging.getLogger(__name__)
LOGGING_FORMAT = "%(asctime)s %(levelname)7s %(message)s"
TEMPLATE_CONFIGURATION = {
"api_key": None,
"language": "en",
}
class ColorScheme(object):
def __init__(self, term, colors=dict()):
self.__dict__['_term'] = term
self.__dict__['_colors'] = dict(colors)
def __setattr__(self, key, name):
self._colors[key] = name
def __getattr__(self, key):
name = self._colors.get(key)
if not name:
raise KeyError("Color not defined for '{0}'".format(key))
color = getattr(self._term, name, None)
if color is None:
raise KeyError("Missing color '{0}'".format(name))
return color
def setup_parser(parser):
parser.add_argument(
'--libdir', '-d',
metavar="<directory>",
help="Library directory, defaults to $HOME/.ontv.",
default=None,
)
parser.add_argument(
'--loglevel', '-l',
metavar="<level>",
help="Logging level to use.",
default=logging.WARNING,
)
parser.add_argument(
'--debug',
dest='loglevel',
help="Use a debugging log level.",
action='store_const',
const=logging.DEBUG,
)
parser.add_argument(
'--apikey',
dest='api_key',
metavar="<key>",
help="API key to use",
default=None,
)
parser.add_argument(
'--language',
dest='language',
metavar="<language>",
help="Language to use",
default=None,
)
subparsers = parser.add_subparsers()
sync_parser = subparsers.add_parser(
"sync",
help="Synchronize local database.",
)
sync_setup(sync_parser)
search_parser = subparsers.add_parser(
"search",
help="Search for tv series.",
)
search_setup(search_parser)
add_parser = subparsers.add_parser(
"add",
help="Add tv series.",
)
add_setup(add_parser)
remove_parser = subparsers.add_parser(
"remove",
help="Remove tv series.",
)
remove_setup(remove_parser)
list_parser = subparsers.add_parser(
"list",
help="List tv series you are watching.",
)
list_setup(list_parser)
show_parser = subparsers.add_parser(
"show",
help="Show episodes in a series.",
)
show_setup(show_parser)
mark_parser = subparsers.add_parser(
"mark",
help="Mark or unmark an episode as watched.",
)
mark_setup(mark_parser)
next_parser = subparsers.add_parser(
"next",
help="Show the next episode to watch.",
)
next_setup(next_parser)
compact_parser = subparsers.add_parser(
"compact",
help="Make the local database smaller.",
)
compact_setup(compact_parser)
def setup_ns(ns):
home = os.environ.get("HOME")
if ns.libdir is None:
if not home:
raise Exception("missing environment variable: HOME")
ns.libdir = os.path.join(home, '.ontv')
ns.backup_libdir = os.path.join(home, '.ontv-backups')
ns.mirrors_path = os.path.join(ns.libdir, 'mirrors.yaml')
ns.languages_path = os.path.join(ns.libdir, 'languages.yaml')
ns.config_path = os.path.join(ns.libdir, 'config.yaml')
ns.db_path = os.path.join(ns.libdir, 'db')
ns.series_db_path = os.path.join(ns.libdir, 'series')
ns.episodes_db_path = os.path.join(ns.libdir, 'episodes')
ns.watched_db_path = os.path.join(ns.libdir, 'watched')
directories = [
ns.libdir,
]
for directory in directories:
if not os.path.isdir(directory):
log.info("Creating directory {0}".format(directory))
os.mkdir(directory)
ns.t = blessings.Terminal()
if os.path.isfile(ns.config_path):
log.debug("Loading configuration from {0}".format(ns.config_path))
with open(ns.config_path) as fp:
for key, value in read_yaml(fp).items():
setattr(ns, key, value)
else:
log.info("Creating default configuration {0}".format(ns.config_path))
with open(ns.config_path, 'w') as fp:
write_yaml(fp, TEMPLATE_CONFIGURATION)
if os.path.isfile(ns.mirrors_path):
log.debug("Loading mirrors from {0}".format(ns.mirrors_path))
with open(ns.mirrors_path) as fp:
ns.mirrors = read_yaml(fp)
else:
ns.mirrors = []
if os.path.isfile(ns.languages_path):
log.debug("Loading mirrors from {0}".format(ns.languages_path))
with open(ns.languages_path) as fp:
ns.languages = read_yaml(fp)
else:
ns.languages = []
if ns.languages:
ns.abbrev_languages = [l['abbreviation'] for l in ns.languages]
else:
ns.abbrev_languages = []
if ns.mirrors:
ns.base_url = random.choice(ns.mirrors)['mirrorpath']
log.debug("Picked mirror: {0}".format(ns.base_url))
else:
ns.base_url = None
ns.api = TheTVDBApi(ns.api_key, base_url=ns.base_url)
if ns.abbrev_languages and ns.language:
if ns.language not in ns.abbrev_languages:
raise Exception(
"Language not valid, must be one of {0}".format(
", ".join(ns.abbrev_languages)))
else:
ns.language = None
ns.is_synced = bool(ns.abbrev_languages)
ns.C = ColorScheme(ns.t)
ns.C.range_before = 'green'
ns.C.range_inside = 'yellow'
ns.C.range_outside = 'red'
ns.C.all_seen = 'magenta'
ns.C.warning = 'red'
ns.C.info = 'green'
ns.C.title = 'bold_magenta'
ns.C.series_title = 'bold'
def migrate_1(ns):
db = ns.databases['db']
series = ns.databases['series']
episodes = ns.databases['episodes']
watched = ns.databases['watched']
del db["series"]
del db["watched"]
# convert all series id keys into numeric values.
for key, value in series.items():
series[int(key)] = value
del series[key]
# convert all episodes id keys into numeric values.
for key, value in episodes.items():
episodes[int(key)] = value
del episodes[key]
# convert all watched id keys into numeric values.
for key, value in watched.items():
watched[int(key)] = value
del watched[key]
AVAILABLE_MIGRATIONS = {
1: migrate_1
}
def backup_libdir(ns):
if not os.path.isdir(ns.backup_libdir):
os.makedirs(ns.backup_libdir)
now = datetime.datetime.now()
i = 0
while True:
date_dir = now.strftime('%Y_%m_%d')
target = os.path.join(
ns.backup_libdir, "{0}_{1}".format(date_dir, i))
if not os.path.isdir(target):
break
i += 1
shutil.copytree(ns.libdir, target)
return target
def migration_check(ns):
db = ns.databases['db']
version = db.get('scheme-version', 1)
migrations = list()
for key, migration in AVAILABLE_MIGRATIONS.items():
if version <= key:
migrations.append((key, migration))
if migrations:
path = backup_libdir(ns)
ns.out(ns.C.warning("Backed up library directory to: {}".format(path)))
for key, migration in migrations:
ns.out(ns.C.title("Migrating database version {0}".format(key)))
migration(ns)
db['scheme-version'] = SCHEME_VERSION
class Printer(object):
def __init__(self, stream, encoding='utf-8'):
self._stream = stream
self._encoding = encoding
def __call__(self, string):
if not isinstance(string, unicode):
raise Exception("Only unicode objects should be used")
self._stream.write(string.encode(self._encoding) + '\n')
def main(args):
parser = argparse.ArgumentParser(version="ontv " + __version__)
setup_parser(parser)
ns = parser.parse_args(args)
ns.out = Printer(sys.stdout)
logging.basicConfig(format=LOGGING_FORMAT, level=ns.loglevel)
setup_ns(ns)
databases = contextlib.nested(
open_database(ns.db_path),
open_database(ns.series_db_path),
open_database(ns.episodes_db_path),
open_database(ns.watched_db_path),
)
with databases as (db, series, episodes, watched):
ns.databases = {
"db": db,
"series": series,
"episodes": episodes,
"watched": watched,
}
migration_check(ns)
ns.series = SeriesDAO(series, episodes, watched)
if not ns.is_synced and ns.action != sync_action:
ns.out(ns.t.bold_red("Your first action should be 'sync'"))
return 1
return ns.action(ns)
| gpl-3.0 |
achang97/YouTunes | lib/python2.7/site-packages/pyasn1_modules/rfc2459.py | 12 | 49428 | #
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
# X.509 message syntax
#
# ASN.1 source from:
# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/x509.asn
# http://www.ietf.org/rfc/rfc2459.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import tag, namedtype, namedval, univ, constraint, char, useful
MAX = float('inf')
#
# PKIX1Explicit88
#
# Upper Bounds
ub_name = univ.Integer(32768)
ub_common_name = univ.Integer(64)
ub_locality_name = univ.Integer(128)
ub_state_name = univ.Integer(128)
ub_organization_name = univ.Integer(64)
ub_organizational_unit_name = univ.Integer(64)
ub_title = univ.Integer(64)
ub_match = univ.Integer(128)
ub_emailaddress_length = univ.Integer(128)
ub_common_name_length = univ.Integer(64)
ub_country_name_alpha_length = univ.Integer(2)
ub_country_name_numeric_length = univ.Integer(3)
ub_domain_defined_attributes = univ.Integer(4)
ub_domain_defined_attribute_type_length = univ.Integer(8)
ub_domain_defined_attribute_value_length = univ.Integer(128)
ub_domain_name_length = univ.Integer(16)
ub_extension_attributes = univ.Integer(256)
ub_e163_4_number_length = univ.Integer(15)
ub_e163_4_sub_address_length = univ.Integer(40)
ub_generation_qualifier_length = univ.Integer(3)
ub_given_name_length = univ.Integer(16)
ub_initials_length = univ.Integer(5)
ub_integer_options = univ.Integer(256)
ub_numeric_user_id_length = univ.Integer(32)
ub_organization_name_length = univ.Integer(64)
ub_organizational_unit_name_length = univ.Integer(32)
ub_organizational_units = univ.Integer(4)
ub_pds_name_length = univ.Integer(16)
ub_pds_parameter_length = univ.Integer(30)
ub_pds_physical_address_lines = univ.Integer(6)
ub_postal_code_length = univ.Integer(16)
ub_surname_length = univ.Integer(40)
ub_terminal_id_length = univ.Integer(24)
ub_unformatted_address_length = univ.Integer(180)
ub_x121_address_length = univ.Integer(16)
class UniversalString(char.UniversalString):
pass
class BMPString(char.BMPString):
pass
class UTF8String(char.UTF8String):
pass
id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
id_qt = univ.ObjectIdentifier('1.3.6.1.5.5.7.2')
id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
id_ad = univ.ObjectIdentifier('1.3.6.1.5.5.7.48')
id_qt_cps = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.1')
id_qt_unotice = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.2')
id_ad_ocsp = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.1')
id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
class AttributeValue(univ.Any):
pass
class AttributeType(univ.ObjectIdentifier):
pass
class AttributeTypeAndValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('value', AttributeValue())
)
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
id_at = univ.ObjectIdentifier('2.5.4')
id_at_name = univ.ObjectIdentifier('2.5.4.41')
# preserve misspelled variable for compatibility
id_at_sutname = id_at_surname = univ.ObjectIdentifier('2.5.4.4')
id_at_givenName = univ.ObjectIdentifier('2.5.4.42')
id_at_initials = univ.ObjectIdentifier('2.5.4.43')
id_at_generationQualifier = univ.ObjectIdentifier('2.5.4.44')
class X520name(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
)
id_at_commonName = univ.ObjectIdentifier('2.5.4.3')
class X520CommonName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
)
id_at_localityName = univ.ObjectIdentifier('2.5.4.7')
class X520LocalityName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
)
id_at_stateOrProvinceName = univ.ObjectIdentifier('2.5.4.8')
class X520StateOrProvinceName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
)
id_at_organizationName = univ.ObjectIdentifier('2.5.4.10')
class X520OrganizationName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
)
id_at_organizationalUnitName = univ.ObjectIdentifier('2.5.4.11')
class X520OrganizationalUnitName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
)
id_at_title = univ.ObjectIdentifier('2.5.4.12')
class X520Title(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
)
id_at_dnQualifier = univ.ObjectIdentifier('2.5.4.46')
class X520dnQualifier(char.PrintableString):
pass
id_at_countryName = univ.ObjectIdentifier('2.5.4.6')
class X520countryName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(2, 2)
pkcs_9 = univ.ObjectIdentifier('1.2.840.113549.1.9')
emailAddress = univ.ObjectIdentifier('1.2.840.113549.1.9.1')
class Pkcs9email(char.IA5String):
subtypeSpec = char.IA5String.subtypeSpec + constraint.ValueSizeConstraint(1, ub_emailaddress_length)
# ----
class DSAPrivateKey(univ.Sequence):
"""PKIX compliant DSA private key structure"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 0)))),
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('public', univ.Integer()),
namedtype.NamedType('private', univ.Integer())
)
# ----
class RelativeDistinguishedName(univ.SetOf):
componentType = AttributeTypeAndValue()
class RDNSequence(univ.SequenceOf):
componentType = RelativeDistinguishedName()
class Name(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('', RDNSequence())
)
class DirectoryString(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
# hm, this should not be here!? XXX
)
# certificate and CRL specific structures begin here
class AlgorithmIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('parameters', univ.Any())
)
class Extension(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
namedtype.NamedType('extnValue', univ.Any())
)
class Extensions(univ.SequenceOf):
componentType = Extension()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
class UniqueIdentifier(univ.BitString):
pass
class Time(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
class Validity(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('notBefore', Time()),
namedtype.NamedType('notAfter', Time())
)
class CertificateSerialNumber(univ.Integer):
pass
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('v1', 0), ('v2', 1), ('v3', 2)
)
class TBSCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('serialNumber', CertificateSerialNumber()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('validity', Validity()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('extensions', Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class Certificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificate', TBSCertificate()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', univ.BitString())
)
# CRL structures
class RevokedCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('userCertificate', CertificateSerialNumber()),
namedtype.NamedType('revocationDate', Time()),
namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
)
class TBSCertList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('version', Version()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('thisUpdate', Time()),
namedtype.OptionalNamedType('nextUpdate', Time()),
namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=RevokedCertificate())),
namedtype.OptionalNamedType('crlExtensions', Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class CertificateList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertList', TBSCertList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
# Algorithm OIDs and parameter structures
pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
class Dss_Sig_Value(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('r', univ.Integer()),
namedtype.NamedType('s', univ.Integer())
)
dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
class ValidationParms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('seed', univ.BitString()),
namedtype.NamedType('pgenCounter', univ.Integer())
)
class DomainParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('j', univ.Integer()),
namedtype.OptionalNamedType('validationParms', ValidationParms())
)
id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
class Dss_Parms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer())
)
# x400 address syntax starts here
teletex_domain_defined_attributes = univ.Integer(6)
class TeletexDomainDefinedAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.TeletexString())
)
class TeletexDomainDefinedAttributes(univ.SequenceOf):
componentType = TeletexDomainDefinedAttribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
terminal_type = univ.Integer(23)
class TerminalType(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, ub_integer_options)
namedValues = namedval.NamedValues(
('telex', 3),
('teletelex', 4),
('g3-facsimile', 5),
('g4-facsimile', 6),
('ia5-terminal', 7),
('videotex', 8)
)
class PresentationAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
)
extended_network_address = univ.Integer(22)
class E163_4_address(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('number', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtendedNetworkAddress(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('e163-4-address', E163_4_address()),
namedtype.NamedType('psap-address', PresentationAddress().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class PDSParameter(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
)
local_postal_attributes = univ.Integer(21)
class LocalPostalAttributes(PDSParameter):
pass
class UniquePostalName(PDSParameter):
pass
unique_postal_name = univ.Integer(20)
poste_restante_address = univ.Integer(19)
class PosteRestanteAddress(PDSParameter):
pass
post_office_box_address = univ.Integer(18)
class PostOfficeBoxAddress(PDSParameter):
pass
street_address = univ.Integer(17)
class StreetAddress(PDSParameter):
pass
class UnformattedPostalAddress(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_physical_address_lines)))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
)
physical_delivery_office_name = univ.Integer(10)
class PhysicalDeliveryOfficeName(PDSParameter):
pass
physical_delivery_office_number = univ.Integer(11)
class PhysicalDeliveryOfficeNumber(PDSParameter):
pass
extension_OR_address_components = univ.Integer(12)
class ExtensionORAddressComponents(PDSParameter):
pass
physical_delivery_personal_name = univ.Integer(13)
class PhysicalDeliveryPersonalName(PDSParameter):
pass
physical_delivery_organization_name = univ.Integer(14)
class PhysicalDeliveryOrganizationName(PDSParameter):
pass
extension_physical_delivery_address_components = univ.Integer(15)
class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
pass
unformatted_postal_address = univ.Integer(16)
postal_code = univ.Integer(9)
class PostalCode(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
namedtype.NamedType('printable-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
)
class PhysicalDeliveryCountryName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class PDSName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_pds_name_length)
physical_delivery_country_name = univ.Integer(8)
class TeletexOrganizationalUnitName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
pds_name = univ.Integer(7)
teletex_organizational_unit_names = univ.Integer(5)
class TeletexOrganizationalUnitNames(univ.SequenceOf):
componentType = TeletexOrganizationalUnitName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
teletex_personal_name = univ.Integer(4)
class TeletexPersonalName(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
teletex_organization_name = univ.Integer(3)
class TeletexOrganizationName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
teletex_common_name = univ.Integer(2)
class TeletexCommonName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
class CommonName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
common_name = univ.Integer(1)
class ExtensionAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_extension_attributes),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('extension-attribute-value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtensionAttributes(univ.SetOf):
componentType = ExtensionAttribute()
subtypeSpec = univ.SetOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_extension_attributes)
class BuiltInDomainDefinedAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
)
class BuiltInDomainDefinedAttributes(univ.SequenceOf):
componentType = BuiltInDomainDefinedAttribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
class OrganizationalUnitName(char.PrintableString):
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
class OrganizationalUnitNames(univ.SequenceOf):
componentType = OrganizationalUnitName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
class PersonalName(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class NumericUserIdentifier(char.NumericString):
subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
class OrganizationName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
class PrivateDomainName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
)
class TerminalIdentifier(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_terminal_id_length)
class X121Address(char.NumericString):
subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_x121_address_length)
class NetworkAddress(X121Address):
pass
class AdministrationDomainName(univ.Choice):
tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
)
class CountryName(univ.Choice):
tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class BuiltInStandardAttributes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('country-name', CountryName()),
namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
)
class ORAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
)
#
# PKIX1Implicit88
#
id_ce_invalidityDate = univ.ObjectIdentifier('2.5.29.24')
class InvalidityDate(useful.GeneralizedTime):
pass
id_holdinstruction_none = univ.ObjectIdentifier('2.2.840.10040.2.1')
id_holdinstruction_callissuer = univ.ObjectIdentifier('2.2.840.10040.2.2')
id_holdinstruction_reject = univ.ObjectIdentifier('2.2.840.10040.2.3')
holdInstruction = univ.ObjectIdentifier('2.2.840.10040.2')
id_ce_holdInstructionCode = univ.ObjectIdentifier('2.5.29.23')
class HoldInstructionCode(univ.ObjectIdentifier):
pass
id_ce_cRLReasons = univ.ObjectIdentifier('2.5.29.21')
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8)
)
id_ce_cRLNumber = univ.ObjectIdentifier('2.5.29.20')
class CRLNumber(univ.Integer):
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
class BaseCRLNumber(CRLNumber):
pass
id_kp_serverAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.1')
id_kp_clientAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.2')
id_kp_codeSigning = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.3')
id_kp_emailProtection = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.4')
id_kp_ipsecEndSystem = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.5')
id_kp_ipsecTunnel = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.6')
id_kp_ipsecUser = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.7')
id_kp_timeStamping = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.8')
id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
id_ce_extKeyUsage = univ.ObjectIdentifier('2.5.29.37')
class KeyPurposeId(univ.ObjectIdentifier):
pass
class ExtKeyUsageSyntax(univ.SequenceOf):
componentType = KeyPurposeId()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class ReasonFlags(univ.BitString):
namedValues = namedval.NamedValues(
('unused', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6)
)
class SkipCerts(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
id_ce_policyConstraints = univ.ObjectIdentifier('2.5.29.36')
class PolicyConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('requireExplicitPolicy', SkipCerts().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('inhibitPolicyMapping', SkipCerts().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
id_ce_basicConstraints = univ.ObjectIdentifier('2.5.29.19')
class BasicConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('cA', univ.Boolean(False)),
namedtype.OptionalNamedType('pathLenConstraint',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
)
id_ce_subjectDirectoryAttributes = univ.ObjectIdentifier('2.5.29.9')
class SubjectDirectoryAttributes(univ.SequenceOf):
componentType = Attribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class EDIPartyName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('partyName',
DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class AnotherName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType('value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class GeneralName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName',
AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('rfc822Name',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('dNSName',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('x400Address',
ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('directoryName',
Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.NamedType('ediPartyName',
EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.NamedType('uniformResourceIdentifier',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.NamedType('iPAddress', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
)
class GeneralNames(univ.SequenceOf):
componentType = GeneralName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class AccessDescription(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
namedtype.NamedType('accessLocation', GeneralName())
)
class AuthorityInfoAccessSyntax(univ.SequenceOf):
componentType = AccessDescription()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_deltaCRLIndicator = univ.ObjectIdentifier('2.5.29.27')
class DistributionPointName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('fullName', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class DistributionPoint(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class BaseDistance(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(0, MAX)
id_ce_cRLDistributionPoints = univ.ObjectIdentifier('2.5.29.31')
class CRLDistPointsSyntax(univ.SequenceOf):
componentType = DistributionPoint()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_issuingDistributionPoint = univ.ObjectIdentifier('2.5.29.28')
class IssuingDistributionPoint(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('onlyContainsUserCerts', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('onlyContainsCACerts', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('indirectCRL', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
)
class GeneralSubtree(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('base', GeneralName()),
namedtype.DefaultedNamedType('minimum', BaseDistance(0).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class GeneralSubtrees(univ.SequenceOf):
componentType = GeneralSubtree()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_nameConstraints = univ.ObjectIdentifier('2.5.29.30')
class NameConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class DisplayText(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('visibleString',
char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
)
class NoticeReference(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('organization', DisplayText()),
namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
)
class UserNotice(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('noticeRef', NoticeReference()),
namedtype.OptionalNamedType('explicitText', DisplayText())
)
class CPSuri(char.IA5String):
pass
class PolicyQualifierId(univ.ObjectIdentifier):
subtypeSpec = univ.ObjectIdentifier.subtypeSpec + constraint.SingleValueConstraint(id_qt_cps, id_qt_unotice)
class CertPolicyId(univ.ObjectIdentifier):
pass
class PolicyQualifierInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
namedtype.NamedType('qualifier', univ.Any())
)
id_ce_certificatePolicies = univ.ObjectIdentifier('2.5.29.32')
class PolicyInformation(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyIdentifier', CertPolicyId()),
namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class CertificatePolicies(univ.SequenceOf):
componentType = PolicyInformation()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_policyMappings = univ.ObjectIdentifier('2.5.29.33')
class PolicyMapping(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
)
class PolicyMappings(univ.SequenceOf):
componentType = PolicyMapping()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_privateKeyUsagePeriod = univ.ObjectIdentifier('2.5.29.16')
class PrivateKeyUsagePeriod(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_ce_keyUsage = univ.ObjectIdentifier('2.5.29.15')
class KeyUsage(univ.BitString):
namedValues = namedval.NamedValues(
('digitalSignature', 0),
('nonRepudiation', 1),
('keyEncipherment', 2),
('dataEncipherment', 3),
('keyAgreement', 4),
('keyCertSign', 5),
('cRLSign', 6),
('encipherOnly', 7),
('decipherOnly', 8)
)
id_ce = univ.ObjectIdentifier('2.5.29')
id_ce_authorityKeyIdentifier = univ.ObjectIdentifier('2.5.29.35')
class KeyIdentifier(univ.OctetString):
pass
id_ce_subjectKeyIdentifier = univ.ObjectIdentifier('2.5.29.14')
class SubjectKeyIdentifier(KeyIdentifier):
pass
class AuthorityKeyIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
id_ce_certificateIssuer = univ.ObjectIdentifier('2.5.29.29')
class CertificateIssuer(GeneralNames):
pass
id_ce_subjectAltName = univ.ObjectIdentifier('2.5.29.17')
class SubjectAltName(GeneralNames):
pass
id_ce_issuerAltName = univ.ObjectIdentifier('2.5.29.18')
class IssuerAltName(GeneralNames):
pass
# map of AttributeType -> AttributeValue
certificateAttributesMap = {
id_at_name: X520name(),
id_at_surname: X520name(),
id_at_givenName: X520name(),
id_at_initials: X520name(),
id_at_generationQualifier: X520name(),
id_at_commonName: X520CommonName(),
id_at_localityName: X520LocalityName(),
id_at_stateOrProvinceName: X520StateOrProvinceName(),
id_at_organizationName: X520OrganizationName(),
id_at_organizationalUnitName: X520OrganizationalUnitName(),
id_at_title: X520Title(),
id_at_dnQualifier: X520dnQualifier(),
id_at_countryName: X520countryName(),
emailAddress: Pkcs9email(),
}
# map of Certificate Extension OIDs to Extensions
certificateExtensionsMap = {
id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
id_ce_keyUsage: KeyUsage(),
id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
id_ce_certificatePolicies: PolicyInformation(), # could be a sequence of concat'ed objects?
id_ce_policyMappings: PolicyMappings(),
id_ce_subjectAltName: SubjectAltName(),
id_ce_issuerAltName: IssuerAltName(),
id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
id_ce_basicConstraints: BasicConstraints(),
id_ce_nameConstraints: NameConstraints(),
id_ce_policyConstraints: PolicyConstraints(),
id_ce_extKeyUsage: ExtKeyUsageSyntax(),
id_ce_cRLDistributionPoints: CRLDistPointsSyntax(),
id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
id_ce_cRLNumber: univ.Integer(),
id_ce_deltaCRLIndicator: BaseCRLNumber(),
id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
id_ce_cRLReasons: CRLReason(),
id_ce_holdInstructionCode: univ.ObjectIdentifier(),
id_ce_invalidityDate: useful.GeneralizedTime(),
id_ce_certificateIssuer: GeneralNames(),
}
| mit |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/python/lib/python3.3/encodings/iso2022_jp_1.py | 816 | 1061 | #
# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_1')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 |
uclouvain/osis | webservices/tests/api/serializers/test_auth_token.py | 2 | 2694 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.contrib.auth import backends
from django.test import TestCase
from rest_framework import serializers
from base.tests.factories.user import UserFactory
from webservices.api.serializers.auth_token import AuthTokenSerializer
class AuthTokenSerializerTestCase(TestCase):
def test_serializer_case_username_required(self):
serializer = AuthTokenSerializer(data={})
self.assertFalse(serializer.is_valid())
def test_serializer_case_username_not_exist(self):
serializer = AuthTokenSerializer(data={'username': 'dummy-username'})
with self.assertRaises(serializers.ValidationError):
serializer.is_valid(raise_exception=True)
def test_serializer_case_username_not_exist_with_force_create(self):
serializer = AuthTokenSerializer(data={'username': 'dummy-username', 'force_user_creation': True})
self.assertTrue(serializer.is_valid())
UserModel = backends.get_user_model()
user_created = serializer.validated_data['user']
self.assertEqual(
'dummy-username',
getattr(user_created, UserModel.USERNAME_FIELD)
)
def test_serializer_case_success_ensure_user(self):
user = UserFactory()
serializer = AuthTokenSerializer(data={'username': user.username})
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data['user'], user)
| agpl-3.0 |
Limezero/libreoffice | wizards/com/sun/star/wizards/agenda/AgendaWizardDialogImpl.py | 6 | 14523 | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
import traceback
import os.path
from .AgendaWizardDialog import AgendaWizardDialog, uno
from .AgendaWizardDialogConst import HID
from .AgendaDocument import AgendaDocument, TextElement
from .TemplateConsts import TemplateConsts
from .TopicsControl import TopicsControl
from .CGAgenda import CGAgenda
from ..ui.PathSelection import PathSelection
from ..ui.event.UnoDataAware import UnoDataAware
from ..ui.event.RadioDataAware import RadioDataAware
from ..ui.event.CommonListener import TerminateListenerProcAdapter
from ..common.NoValidPathException import NoValidPathException
from ..common.SystemDialog import SystemDialog
from ..common.Desktop import Desktop
from ..common.HelpIds import HelpIds
from ..common.Configuration import Configuration
from ..common.FileAccess import FileAccess
from ..document.OfficeDocument import OfficeDocument
from com.sun.star.util import CloseVetoException
from com.sun.star.view.DocumentZoomType import OPTIMAL
from com.sun.star.awt.VclWindowPeerAttribute import YES_NO, DEF_NO
class AgendaWizardDialogImpl(AgendaWizardDialog):
def __init__(self, xmsf):
super(AgendaWizardDialogImpl, self).__init__(xmsf)
self.filenameChanged = False
self.pageDesign = -1
def enterStep(self, OldStep, NewStep):
pass
def leaveStep(self, OldStep, NewStep):
pass
def startWizard(self, xMSF):
self.running = True
try:
#Number of steps on WizardDialog
self.nMaxStep = 6
self.agenda = CGAgenda()
# read configuration data before we initialize the topics
root = Configuration.getConfigurationRoot(
self.xMSF, "/org.openoffice.Office.Writer/Wizards/Agenda",
False)
self.agenda.readConfiguration(root, "cp_")
self.templateConsts = TemplateConsts
self.initializePaths()
# initialize the agenda template
self.terminateListener = TerminateListenerProcAdapter(self)
self.myAgendaDoc = AgendaDocument(
self.xMSF, self.agenda, self.resources,
self.templateConsts, self.terminateListener)
self.initializeTemplates()
self.myAgendaDoc.load(
self.agendaTemplates[1][self.agenda.cp_AgendaType])
self.drawConstants()
# build the dialog.
self.drawNaviBar()
self.buildStep1()
self.buildStep2()
self.buildStep3()
self.buildStep4()
self.buildStep5()
self.buildStep6()
self.topicsControl = TopicsControl(self, self.xMSF, self.agenda)
#special Control for setting the save Path:
self.insertPathSelectionControl()
# synchronize GUI and CGAgenda object.
self.initConfiguration()
if self.myPathSelection.xSaveTextBox.Text.lower() == "":
self.myPathSelection.initializePath()
# create the peer
xContainerWindow = self.myAgendaDoc.xFrame.ContainerWindow
self.createWindowPeer(xContainerWindow)
# initialize roadmap
self.insertRoadmap()
self.executeDialogFromComponent(self.myAgendaDoc.xFrame)
self.removeTerminateListener()
self.closeDocument()
self.running = False
except Exception:
self.removeTerminateListener()
traceback.print_exc()
self.running = False
return
def insertPathSelectionControl(self):
self.myPathSelection = PathSelection(
self.xMSF, self, PathSelection.TransferMode.SAVE,
PathSelection.DialogTypes.FILE)
self.myPathSelection.insert(6, 97, 70, 205, 45,
self.resources.reslblTemplatePath_value, True,
HelpIds.getHelpIdString(HID + 24),
HelpIds.getHelpIdString(HID + 25))
self.myPathSelection.sDefaultDirectory = self.sUserTemplatePath
self.myPathSelection.sDefaultName = "myAgendaTemplate.ott"
self.myPathSelection.sDefaultFilter = "writer8_template"
self.myPathSelection.addSelectionListener(self)
'''
bind controls to the agenda member (DataAware model)
'''
def initConfiguration(self):
self.xDialogModel.listPageDesign.StringItemList = \
tuple(self.agendaTemplates[0])
UnoDataAware.attachListBox(
self.agenda, "cp_AgendaType", self.listPageDesign, True).updateUI()
self.pageDesign = self.agenda.cp_AgendaType
UnoDataAware.attachCheckBox(
self.agenda, "cp_IncludeMinutes", self.chkMinutes, True).updateUI()
UnoDataAware.attachEditControl(
self.agenda, "cp_Title", self.txtTitle, True).updateUI()
UnoDataAware.attachDateControl(
self.agenda, "cp_Date", self.txtDate, True).updateUI()
UnoDataAware.attachTimeControl(
self.agenda, "cp_Time", self.txtTime, True).updateUI()
UnoDataAware.attachEditControl(
self.agenda, "cp_Location", self.cbLocation, True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowMeetingType", self.chkMeetingTitle,
True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowRead", self.chkRead, True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowBring", self.chkBring, True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowNotes", self.chkNotes, True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowCalledBy", self.chkConvenedBy,
True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowFacilitator", self.chkPresiding,
True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowNotetaker", self.chkNoteTaker,
True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowTimekeeper", self.chkTimekeeper,
True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowAttendees", self.chkAttendees,
True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowObservers", self.chkObservers,
True).updateUI()
UnoDataAware.attachCheckBox(
self.agenda, "cp_ShowResourcePersons",self.chkResourcePersons,
True).updateUI()
UnoDataAware.attachEditControl(
self.agenda, "cp_TemplateName", self.txtTemplateName,
True).updateUI()
RadioDataAware.attachRadioButtons(
self.agenda, "cp_ProceedMethod",
(self.optCreateAgenda, self.optMakeChanges), True).updateUI()
def insertRoadmap(self):
self.addRoadmap()
self.insertRoadMapItems(
self.resources.RoadmapLabels, [True, True, True, True, True, True])
self.setRoadmapInteractive(True)
self.setRoadmapComplete(True)
self.setCurrentRoadmapItemID(1)
'''
read the available agenda wizard templates.
'''
def initializeTemplates(self):
try:
sAgendaPath = self.sTemplatePath + "/wizard/agenda"
self.agendaTemplates = FileAccess.getFolderTitles(
self.xMSF, "aw", sAgendaPath, self.resources.dictPageDesign)
return True
except NoValidPathException:
traceback.print_exc()
return False
'''
first page, page design listbox changed.
'''
def pageDesignChanged(self):
try:
SelectedItemPos = self.listPageDesign.SelectedItemPos
#avoid to load the same item again
if self.pageDesign is not SelectedItemPos:
self.pageDesign = SelectedItemPos
self.myAgendaDoc.load(
self.agendaTemplates[1][SelectedItemPos])
self.drawConstants()
except Exception:
traceback.print_exc()
#textFields listeners
def txtTitleTextChanged(self):
self.myAgendaDoc.redrawTitle("txtTitle")
def txtDateTextChanged(self):
self.myAgendaDoc.redrawTitle("txtDate")
def txtTimeTextChanged(self):
self.myAgendaDoc.redrawTitle("txtTime")
def txtLocationTextChanged(self):
self.myAgendaDoc.redrawTitle("cbLocation")
#checkbox listeners
def chkUseMeetingTypeItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_MEETING_TYPE)
def chkUseReadItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_READ)
def chkUseBringItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_BRING)
def chkUseNotesItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_NOTES)
def chkUseCalledByItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_CALLED_BY)
def chkUseFacilitatorItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_FACILITATOR)
def chkUseNoteTakerItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_NOTETAKER)
def chkUseTimeKeeperItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_TIMEKEEPER)
def chkUseAttendeesItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_PARTICIPANTS)
def chkUseObserversItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_OBSERVERS)
def chkUseResourcePersonsItemChanged(self):
self.myAgendaDoc.redraw(self.templateConsts.FILLIN_RESOURCE_PERSONS)
def insertRow(self):
self.topicsControl.insertRow()
def removeRow(self):
self.topicsControl.removeRow()
def rowUp(self):
self.topicsControl.rowUp()
def rowDown(self):
self.topicsControl.rowDown()
def cancelWizard(self):
self.xUnoDialog.endExecute()
self.running = False
def finishWizard(self):
self.switchToStep(self.getCurrentStep(), self.nMaxStep)
bSaveSuccess = False
endWizard = True
try:
self.sPath = self.myPathSelection.getSelectedPath()
if not self.sPath or not os.path.exists(self.sPath):
self.myPathSelection.triggerPathPicker()
self.sPath = self.myPathSelection.getSelectedPath()
#first, if the filename was not changed, thus
#it is coming from a saved session, check if the
# file exists and warn the user.
if not self.filenameChanged:
answer = SystemDialog.showMessageBox(
self.xMSF, "MessBox", YES_NO + DEF_NO,
self.resources.resOverwriteWarning,
self.xUnoDialog.Peer)
if answer == 3:
# user said: no, do not overwrite
endWizard = False
return False
xDocProps = self.myAgendaDoc.xTextDocument.DocumentProperties
xDocProps.Title = self.txtTemplateName.Text
self.myAgendaDoc.setWizardTemplateDocInfo( \
self.resources.resAgendaWizardDialog_title,
self.resources.resTemplateDescription)
bSaveSuccess = OfficeDocument.store(
self.xMSF, self.myAgendaDoc.xTextDocument, self.sPath,
"writer8_template")
if bSaveSuccess:
self.topicsControl.saveTopics(self.agenda)
root = Configuration.getConfigurationRoot(
self.xMSF, "/org.openoffice.Office.Writer/Wizards/Agenda",
True)
self.agenda.writeConfiguration(root, "cp_")
root.commitChanges()
self.myAgendaDoc.finish(self.topicsControl.scrollfields)
loadValues = list(range(2))
loadValues[0] = uno.createUnoStruct( \
'com.sun.star.beans.PropertyValue')
loadValues[0].Name = "AsTemplate"
if self.agenda.cp_ProceedMethod == 1:
loadValues[0].Value = True
else:
loadValues[0].Value = False
loadValues[1] = uno.createUnoStruct( \
'com.sun.star.beans.PropertyValue')
loadValues[1].Name = "InteractionHandler"
xIH = self.xMSF.createInstance(
"com.sun.star.comp.uui.UUIInteractionHandler")
loadValues[1].Value = xIH
oDoc = OfficeDocument.load(
Desktop.getDesktop(self.xMSF),
self.sPath, "_default", loadValues)
oDoc.CurrentController.ViewSettings.ZoomType = OPTIMAL
else:
pass
except Exception:
traceback.print_exc()
finally:
if endWizard:
self.xUnoDialog.endExecute()
self.running = False
return True
def closeDocument(self):
try:
self.myAgendaDoc.xFrame.close(False)
except CloseVetoException:
traceback.print_exc()
def drawConstants(self):
'''Localise the template'''
constRangeList = self.myAgendaDoc.searchFillInItems(1)
for i in constRangeList:
text = i.String.lower()
aux = TextElement(i, self.resources.dictConstants[text])
aux.write()
def validatePath(self):
if self.myPathSelection.usedPathPicker:
self.filenameChanged = True
self.myPathSelection.usedPathPicker = False
| gpl-3.0 |
msmolens/VTK | ThirdParty/Twisted/twisted/test/test_strerror.py | 40 | 5187 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test strerror
"""
import socket
import os
from twisted.trial.unittest import TestCase
from twisted.internet.tcp import ECONNABORTED
from twisted.python.win32 import _ErrorFormatter, formatError
from twisted.python.runtime import platform
class _MyWindowsException(OSError):
"""
An exception type like L{ctypes.WinError}, but available on all platforms.
"""
class ErrorFormatingTestCase(TestCase):
"""
Tests for C{_ErrorFormatter.formatError}.
"""
probeErrorCode = ECONNABORTED
probeMessage = "correct message value"
def test_strerrorFormatting(self):
"""
L{_ErrorFormatter.formatError} should use L{os.strerror} to format
error messages if it is constructed without any better mechanism.
"""
formatter = _ErrorFormatter(None, None, None)
message = formatter.formatError(self.probeErrorCode)
self.assertEqual(message, os.strerror(self.probeErrorCode))
def test_emptyErrorTab(self):
"""
L{_ErrorFormatter.formatError} should use L{os.strerror} to format
error messages if it is constructed with only an error tab which does
not contain the error code it is called with.
"""
error = 1
# Sanity check
self.assertNotEqual(self.probeErrorCode, error)
formatter = _ErrorFormatter(None, None, {error: 'wrong message'})
message = formatter.formatError(self.probeErrorCode)
self.assertEqual(message, os.strerror(self.probeErrorCode))
def test_errorTab(self):
"""
L{_ErrorFormatter.formatError} should use C{errorTab} if it is supplied
and contains the requested error code.
"""
formatter = _ErrorFormatter(
None, None, {self.probeErrorCode: self.probeMessage})
message = formatter.formatError(self.probeErrorCode)
self.assertEqual(message, self.probeMessage)
def test_formatMessage(self):
"""
L{_ErrorFormatter.formatError} should return the return value of
C{formatMessage} if it is supplied.
"""
formatCalls = []
def formatMessage(errorCode):
formatCalls.append(errorCode)
return self.probeMessage
formatter = _ErrorFormatter(
None, formatMessage, {self.probeErrorCode: 'wrong message'})
message = formatter.formatError(self.probeErrorCode)
self.assertEqual(message, self.probeMessage)
self.assertEqual(formatCalls, [self.probeErrorCode])
def test_winError(self):
"""
L{_ErrorFormatter.formatError} should return the message argument from
the exception L{winError} returns, if L{winError} is supplied.
"""
winCalls = []
def winError(errorCode):
winCalls.append(errorCode)
return _MyWindowsException(errorCode, self.probeMessage)
formatter = _ErrorFormatter(
winError,
lambda error: 'formatMessage: wrong message',
{self.probeErrorCode: 'errorTab: wrong message'})
message = formatter.formatError(self.probeErrorCode)
self.assertEqual(message, self.probeMessage)
def test_fromEnvironment(self):
"""
L{_ErrorFormatter.fromEnvironment} should create an L{_ErrorFormatter}
instance with attributes populated from available modules.
"""
formatter = _ErrorFormatter.fromEnvironment()
if formatter.winError is not None:
from ctypes import WinError
self.assertEqual(
formatter.formatError(self.probeErrorCode),
WinError(self.probeErrorCode).strerror)
formatter.winError = None
if formatter.formatMessage is not None:
from win32api import FormatMessage
self.assertEqual(
formatter.formatError(self.probeErrorCode),
FormatMessage(self.probeErrorCode))
formatter.formatMessage = None
if formatter.errorTab is not None:
from socket import errorTab
self.assertEqual(
formatter.formatError(self.probeErrorCode),
errorTab[self.probeErrorCode])
if platform.getType() != "win32":
test_fromEnvironment.skip = "Test will run only on Windows."
def test_correctLookups(self):
"""
Given an known-good errno, make sure that formatMessage gives results
matching either C{socket.errorTab}, C{ctypes.WinError}, or
C{win32api.FormatMessage}.
"""
acceptable = [socket.errorTab[ECONNABORTED]]
try:
from ctypes import WinError
acceptable.append(WinError(ECONNABORTED).strerror)
except ImportError:
pass
try:
from win32api import FormatMessage
acceptable.append(FormatMessage(ECONNABORTED))
except ImportError:
pass
self.assertIn(formatError(ECONNABORTED), acceptable)
if platform.getType() != "win32":
test_correctLookups.skip = "Test will run only on Windows."
| bsd-3-clause |
partofthething/home-assistant | homeassistant/components/netgear_lte/binary_sensor.py | 14 | 1321 | """Support for Netgear LTE binary sensors."""
from homeassistant.components.binary_sensor import DOMAIN, BinarySensorEntity
from homeassistant.exceptions import PlatformNotReady
from . import CONF_MONITORED_CONDITIONS, DATA_KEY, LTEEntity
from .sensor_types import BINARY_SENSOR_CLASSES
async def async_setup_platform(hass, config, async_add_entities, discovery_info):
"""Set up Netgear LTE binary sensor devices."""
if discovery_info is None:
return
modem_data = hass.data[DATA_KEY].get_modem_data(discovery_info)
if not modem_data or not modem_data.data:
raise PlatformNotReady
binary_sensor_conf = discovery_info[DOMAIN]
monitored_conditions = binary_sensor_conf[CONF_MONITORED_CONDITIONS]
binary_sensors = []
for sensor_type in monitored_conditions:
binary_sensors.append(LTEBinarySensor(modem_data, sensor_type))
async_add_entities(binary_sensors)
class LTEBinarySensor(LTEEntity, BinarySensorEntity):
"""Netgear LTE binary sensor entity."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return getattr(self.modem_data.data, self.sensor_type)
@property
def device_class(self):
"""Return the class of binary sensor."""
return BINARY_SENSOR_CLASSES[self.sensor_type]
| mit |
MisterRios/python_koans | python3/koans/about_method_bindings.py | 3 | 2842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
def function():
return "pineapple"
def function2():
return "tractor"
class Class:
def method(self):
return "parrot"
class AboutMethodBindings(Koan):
def test_methods_are_bound_to_an_object(self):
obj = Class()
self.assertEqual(True, obj.method.__self__ == obj)
def test_methods_are_also_bound_to_a_function(self):
obj = Class()
self.assertEqual('parrot', obj.method())
self.assertEqual('parrot', obj.method.__func__(obj))
def test_functions_have_attributes(self):
obj = Class()
self.assertEqual(34, len(dir(function)))
self.assertEqual(True, dir(function) == dir(obj.method.__func__))
def test_methods_have_different_attributes(self):
obj = Class()
self.assertEqual(26, len(dir(obj.method)))
def test_setting_attributes_on_an_unbound_function(self):
function.cherries = 3
self.assertEqual(3, function.cherries)
def test_setting_attributes_on_a_bound_method_directly(self):
obj = Class()
with self.assertRaises(AttributeError): obj.method.cherries = 3
def test_setting_attributes_on_methods_by_accessing_the_inner_function(self):
obj = Class()
obj.method.__func__.cherries = 3
self.assertEqual(3, obj.method.cherries)
def test_functions_can_have_inner_functions(self):
function2.get_fruit = function
self.assertEqual('pineapple', function2.get_fruit())
def test_inner_functions_are_unbound(self):
function2.get_fruit = function
with self.assertRaises(AttributeError): cls = function2.get_fruit.__self__
# ------------------------------------------------------------------
class BoundClass:
def __get__(self, obj, cls):
return (self, obj, cls)
binding = BoundClass()
def test_get_descriptor_resolves_attribute_binding(self):
bound_obj, binding_owner, owner_type = self.binding
# Look at BoundClass.__get__():
# bound_obj = self
# binding_owner = obj
# owner_type = cls
self.assertEqual('BoundClass', bound_obj.__class__.__name__)
self.assertEqual('AboutMethodBindings', binding_owner.__class__.__name__)
self.assertEqual(AboutMethodBindings, owner_type)
# ------------------------------------------------------------------
class SuperColor:
def __init__(self):
self.choice = None
def __set__(self, obj, val):
self.choice = val
color = SuperColor()
def test_set_descriptor_changes_behavior_of_attribute_assignment_changes(self):
self.assertEqual(None, self.color.choice)
self.color = 'purple'
self.assertEqual('purple', self.color.choice)
| mit |
aktech/sympy | sympy/geometry/tests/test_plane.py | 22 | 6944 | from __future__ import division
from sympy import Dummy, S, Symbol, pi, sqrt, asin
from sympy.geometry import Line, Point, Ray, Segment, Point3D, Line3D, Ray3D, Segment3D, Plane
from sympy.geometry.util import are_coplanar
from sympy.utilities.pytest import raises, slow
@slow
def test_plane():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
p1 = Point3D(0, 0, 0)
p2 = Point3D(1, 1, 1)
p3 = Point3D(1, 2, 3)
p4 = Point3D(x, x, x)
p5 = Point3D(y, y, y)
pl3 = Plane(p1, p2, p3)
pl4 = Plane(p1, normal_vector=(1, 1, 1))
pl4b = Plane(p1, p2)
pl5 = Plane(p3, normal_vector=(1, 2, 3))
pl6 = Plane(Point3D(2, 3, 7), normal_vector=(2, 2, 2))
pl7 = Plane(Point3D(1, -5, -6), normal_vector=(1, -2, 1))
l1 = Line3D(Point3D(5, 0, 0), Point3D(1, -1, 1))
l2 = Line3D(Point3D(0, -2, 0), Point3D(3, 1, 1))
l3 = Line3D(Point3D(0, -1, 0), Point3D(5, -1, 9))
assert Plane(p1, p2, p3) != Plane(p1, p3, p2)
assert Plane(p1, p2, p3).is_coplanar(Plane(p1, p3, p2))
assert pl3 == Plane(Point3D(0, 0, 0), normal_vector=(1, -2, 1))
assert pl3 != pl4
assert pl4 == pl4b
assert pl5 == Plane(Point3D(1, 2, 3), normal_vector=(1, 2, 3))
assert pl5.equation(x, y, z) == x + 2*y + 3*z - 14
assert pl3.equation(x, y, z) == x - 2*y + z
assert pl3.p1 == p1
assert pl4.p1 == p1
assert pl5.p1 == p3
assert pl4.normal_vector == (1, 1, 1)
assert pl5.normal_vector == (1, 2, 3)
assert p1 in pl3
assert p1 in pl4
assert p3 in pl5
assert pl3.projection(Point(0, 0)) == p1
p = pl3.projection(Point3D(1, 1, 0))
assert p == Point3D(7/6, 2/3, 1/6)
assert p in pl3
l = pl3.projection_line(Line(Point(0, 0), Point(1, 1)))
assert l == Line3D(Point3D(0, 0, 0), Point3D(7/6, 2/3, 1/6))
assert l in pl3
# get a segment that does not intersect the plane which is also
# parallel to pl3's normal veector
t = Dummy()
r = pl3.random_point()
a = pl3.perpendicular_line(r).arbitrary_point(t)
s = Segment3D(a.subs(t, 1), a.subs(t, 2))
assert s.p1 not in pl3 and s.p2 not in pl3
assert pl3.projection_line(s).equals(r)
assert pl3.projection_line(Segment(Point(1, 0), Point(1, 1))) == \
Segment3D(Point3D(5/6, 1/3, -1/6), Point3D(7/6, 2/3, 1/6))
assert pl6.projection_line(Ray(Point(1, 0), Point(1, 1))) == \
Ray3D(Point3D(14/3, 11/3, 11/3), Point3D(13/3, 13/3, 10/3))
assert pl3.perpendicular_line(r.args) == pl3.perpendicular_line(r)
assert pl3.is_parallel(pl6) is False
assert pl4.is_parallel(pl6)
assert pl6.is_parallel(l1) is False
assert pl3.is_perpendicular(pl6)
assert pl4.is_perpendicular(pl7)
assert pl6.is_perpendicular(pl7)
assert pl6.is_perpendicular(l1) is False
assert pl7.distance(Point3D(1, 3, 5)) == 5*sqrt(6)/6
assert pl6.distance(Point3D(0, 0, 0)) == 4*sqrt(3)
assert pl6.distance(pl6.p1) == 0
assert pl7.distance(pl6) == 0
assert pl7.distance(l1) == 0
assert pl6.distance(Segment3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == 0
pl6.distance(Plane(Point3D(5, 5, 5), normal_vector=(8, 8, 8))) == sqrt(3)
assert pl6.angle_between(pl3) == pi/2
assert pl6.angle_between(pl6) == 0
assert pl6.angle_between(pl4) == 0
assert pl7.angle_between(Line3D(Point3D(2, 3, 5), Point3D(2, 4, 6))) == \
-asin(sqrt(3)/6)
assert pl6.angle_between(Ray3D(Point3D(2, 4, 1), Point3D(6, 5, 3))) == \
asin(sqrt(7)/3)
assert pl7.angle_between(Segment3D(Point3D(5, 6, 1), Point3D(1, 2, 4))) == \
-asin(7*sqrt(246)/246)
assert are_coplanar(l1, l2, l3) is False
assert are_coplanar(l1) is False
assert are_coplanar(Point3D(2, 7, 2), Point3D(0, 0, 2),
Point3D(1, 1, 2), Point3D(1, 2, 2))
assert are_coplanar(Plane(p1, p2, p3), Plane(p1, p3, p2))
assert Plane.are_concurrent(pl3, pl4, pl5) is False
assert Plane.are_concurrent(pl6) is False
raises(ValueError, lambda: Plane.are_concurrent(Point3D(0, 0, 0)))
assert pl3.parallel_plane(Point3D(1, 2, 5)) == Plane(Point3D(1, 2, 5), \
normal_vector=(1, -2, 1))
# perpendicular_plane
p = Plane((0, 0, 0), (1, 0, 0))
# default
assert p.perpendicular_plane() == Plane(Point3D(0, 0, 0), (0, 1, 0))
# 1 pt
assert p.perpendicular_plane(Point3D(1, 0, 1)) == \
Plane(Point3D(1, 0, 1), (0, 1, 0))
# pts as tuples
assert p.perpendicular_plane((1, 0, 1), (1, 1, 1)) == \
Plane(Point3D(1, 0, 1), (0, 0, -1))
a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
Z = (0, 0, 1)
p = Plane(a, normal_vector=Z)
# case 4
assert p.perpendicular_plane(a, b) == Plane(a, (1, 0, 0))
n = Point3D(*Z)
# case 1
assert p.perpendicular_plane(a, n) == Plane(a, (-1, 0, 0))
# case 2
assert Plane(a, normal_vector=b.args).perpendicular_plane(a, a + b) == \
Plane(Point3D(0, 0, 0), (1, 0, 0))
# case 1&3
assert Plane(b, normal_vector=Z).perpendicular_plane(b, b + n) == \
Plane(Point3D(0, 1, 0), (-1, 0, 0))
# case 2&3
assert Plane(b, normal_vector=b.args).perpendicular_plane(n, n + b) == \
Plane(Point3D(0, 0, 1), (1, 0, 0))
assert pl6.intersection(pl6) == [pl6]
assert pl4.intersection(pl4.p1) == [pl4.p1]
assert pl3.intersection(pl6) == [
Line3D(Point3D(8, 4, 0), Point3D(2, 4, 6))]
assert pl3.intersection(Line3D(Point3D(1,2,4), Point3D(4,4,2))) == [
Point3D(2, 8/3, 10/3)]
assert pl3.intersection(Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
) == [Line3D(Point3D(-24, -12, 0), Point3D(-25, -13, -1))]
assert pl6.intersection(Ray3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == [
Point3D(-1, 3, 10)]
assert pl6.intersection(Segment3D(Point3D(2, 3, 1), Point3D(1, 3, 4))) == [
Point3D(-1, 3, 10)]
assert pl7.intersection(Line(Point(2, 3), Point(4, 2))) == [
Point3D(13/2, 3/4, 0)]
r = Ray(Point(2, 3), Point(4, 2))
assert Plane((1,2,0), normal_vector=(0,0,1)).intersection(r) == [
Ray3D(Point(2, 3), Point(4, 2))]
assert pl3.random_point() in pl3
# issue 8570
l2 = Line3D(Point3D(S(50000004459633)/5000000000000,
-S(891926590718643)/1000000000000000,
S(231800966893633)/100000000000000),
Point3D(S(50000004459633)/50000000000000,
-S(222981647679771)/250000000000000,
S(231800966893633)/100000000000000))
p2 = Plane(Point3D(S(402775636372767)/100000000000000,
-S(97224357654973)/100000000000000,
S(216793600814789)/100000000000000),
(-S('9.00000087501922'), -S('4.81170658872543e-13'),
S('0.0')))
assert str([i.n(2) for i in p2.intersection(l2)]) == \
'[Point3D(4.0, -0.89, 2.3)]'
| bsd-3-clause |
chirilo/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/executive_mock.py | 117 | 7106 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import StringIO
from webkitpy.common.system.executive import ScriptError
_log = logging.getLogger(__name__)
class MockProcess(object):
def __init__(self, stdout='MOCK STDOUT\n', stderr=''):
self.pid = 42
self.stdout = StringIO.StringIO(stdout)
self.stderr = StringIO.StringIO(stderr)
self.stdin = StringIO.StringIO()
self.returncode = 0
def wait(self):
return
# FIXME: This should be unified with MockExecutive2
class MockExecutive(object):
PIPE = "MOCK PIPE"
STDOUT = "MOCK STDOUT"
@staticmethod
def ignore_error(error):
pass
def __init__(self, should_log=False, should_throw=False, should_throw_when_run=None):
self._should_log = should_log
self._should_throw = should_throw
self._should_throw_when_run = should_throw_when_run or set()
# FIXME: Once executive wraps os.getpid() we can just use a static pid for "this" process.
self._running_pids = {'test-webkitpy': os.getpid()}
self._proc = None
self.calls = []
self.pid_to_system_pid = {}
def check_running_pid(self, pid):
return pid in self._running_pids.values()
def running_pids(self, process_name_filter):
running_pids = []
for process_name, process_pid in self._running_pids.iteritems():
if process_name_filter(process_name):
running_pids.append(process_pid)
_log.info("MOCK running_pids: %s" % running_pids)
return running_pids
def run_and_throw_if_fail(self, args, quiet=False, cwd=None, env=None):
if self._should_log:
env_string = ""
if env:
env_string = ", env=%s" % env
_log.info("MOCK run_and_throw_if_fail: %s, cwd=%s%s" % (args, cwd, env_string))
if self._should_throw_when_run.intersection(args):
raise ScriptError("Exception for %s" % args, output="MOCK command output")
return "MOCK output of child process"
def command_for_printing(self, args):
string_args = map(unicode, args)
return " ".join(string_args)
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
self.calls.append(args)
assert(isinstance(args, list) or isinstance(args, tuple))
if self._should_log:
env_string = ""
if env:
env_string = ", env=%s" % env
input_string = ""
if input:
input_string = ", input=%s" % input
_log.info("MOCK run_command: %s, cwd=%s%s%s" % (args, cwd, env_string, input_string))
output = "MOCK output of child process"
if self._should_throw_when_run.intersection(args):
raise ScriptError("Exception for %s" % args, output="MOCK command output")
if self._should_throw:
raise ScriptError("MOCK ScriptError", output=output)
return output
def cpu_count(self):
return 2
def kill_all(self, process_name):
pass
def kill_process(self, pid):
pass
def popen(self, args, cwd=None, env=None, **kwargs):
self.calls.append(args)
if self._should_log:
cwd_string = ""
if cwd:
cwd_string = ", cwd=%s" % cwd
env_string = ""
if env:
env_string = ", env=%s" % env
_log.info("MOCK popen: %s%s%s" % (args, cwd_string, env_string))
if not self._proc:
self._proc = MockProcess()
return self._proc
def run_in_parallel(self, commands):
num_previous_calls = len(self.calls)
command_outputs = []
for cmd_line, cwd in commands:
command_outputs.append([0, self.run_command(cmd_line, cwd=cwd), ''])
new_calls = self.calls[num_previous_calls:]
self.calls = self.calls[:num_previous_calls]
self.calls.append(new_calls)
return command_outputs
class MockExecutive2(MockExecutive):
"""MockExecutive2 is like MockExecutive except it doesn't log anything."""
def __init__(self, output='', exit_code=0, exception=None, run_command_fn=None, stderr=''):
self._output = output
self._stderr = stderr
self._exit_code = exit_code
self._exception = exception
self._run_command_fn = run_command_fn
self.calls = []
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
self.calls.append(args)
assert(isinstance(args, list) or isinstance(args, tuple))
if self._exception:
raise self._exception # pylint: disable=E0702
if self._run_command_fn:
return self._run_command_fn(args)
if return_exit_code:
return self._exit_code
if self._exit_code and error_handler:
script_error = ScriptError(script_args=args, exit_code=self._exit_code, output=self._output)
error_handler(script_error)
if return_stderr:
return self._output + self._stderr
return self._output
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.