repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
robbiet480/home-assistant | homeassistant/components/pjlink/media_player.py | 7 | 5600 | """Support for controlling projector via the PJLink protocol."""
import logging
from pypjlink import MUTE_AUDIO, Projector
from pypjlink.projector import ProjectorError
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ENCODING = "encoding"
DEFAULT_PORT = 4352
DEFAULT_ENCODING = "utf-8"
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
}
)
SUPPORT_PJLINK = (
SUPPORT_VOLUME_MUTE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PJLink platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
encoding = config.get(CONF_ENCODING)
password = config.get(CONF_PASSWORD)
if "pjlink" not in hass.data:
hass.data["pjlink"] = {}
hass_data = hass.data["pjlink"]
device_label = f"{host}:{port}"
if device_label in hass_data:
return
device = PjLinkDevice(host, port, name, encoding, password)
hass_data[device_label] = device
add_entities([device], True)
def format_input_source(input_source_name, input_source_number):
"""Format input source for display in UI."""
return f"{input_source_name} {input_source_number}"
class PjLinkDevice(MediaPlayerEntity):
"""Representation of a PJLink device."""
def __init__(self, host, port, name, encoding, password):
"""Iinitialize the PJLink device."""
self._host = host
self._port = port
self._name = name
self._password = password
self._encoding = encoding
self._muted = False
self._pwstate = STATE_OFF
self._current_source = None
with self.projector() as projector:
if not self._name:
self._name = projector.get_name()
inputs = projector.get_inputs()
self._source_name_mapping = {format_input_source(*x): x for x in inputs}
self._source_list = sorted(self._source_name_mapping.keys())
def projector(self):
"""Create PJLink Projector instance."""
projector = Projector.from_address(
self._host, self._port, self._encoding, DEFAULT_TIMEOUT
)
projector.authenticate(self._password)
return projector
def update(self):
"""Get the latest state from the device."""
with self.projector() as projector:
try:
pwstate = projector.get_power()
if pwstate in ("on", "warm-up"):
self._pwstate = STATE_ON
self._muted = projector.get_mute()[1]
self._current_source = format_input_source(*projector.get_input())
else:
self._pwstate = STATE_OFF
self._muted = False
self._current_source = None
except KeyError as err:
if str(err) == "'OK'":
self._pwstate = STATE_OFF
self._muted = False
self._current_source = None
else:
raise
except ProjectorError as err:
if str(err) == "unavailable time":
self._pwstate = STATE_OFF
self._muted = False
self._current_source = None
else:
raise
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def is_volume_muted(self):
"""Return boolean indicating mute status."""
return self._muted
@property
def source(self):
"""Return current input source."""
return self._current_source
@property
def source_list(self):
"""Return all available input sources."""
return self._source_list
@property
def supported_features(self):
"""Return projector supported features."""
return SUPPORT_PJLINK
def turn_off(self):
"""Turn projector off."""
if self._pwstate == STATE_ON:
with self.projector() as projector:
projector.set_power("off")
def turn_on(self):
"""Turn projector on."""
if self._pwstate == STATE_OFF:
with self.projector() as projector:
projector.set_power("on")
def mute_volume(self, mute):
"""Mute (true) of unmute (false) media player."""
with self.projector() as projector:
projector.set_mute(MUTE_AUDIO, mute)
def select_source(self, source):
"""Set the input source."""
source = self._source_name_mapping[source]
with self.projector() as projector:
projector.set_input(*source)
| apache-2.0 |
cenobites/flask-jsonrpc | examples/modular/modular.py | 1 | 2643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021, Cenobit Technologies, Inc. http://cenobit.es/
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Cenobit Technologies nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# isort:skip_file
import os
import sys
from flask import Flask
PROJECT_DIR, PROJECT_MODULE_NAME = os.path.split(os.path.dirname(os.path.realpath(__file__)))
FLASK_JSONRPC_PROJECT_DIR = os.path.join(PROJECT_DIR, os.pardir)
if os.path.exists(FLASK_JSONRPC_PROJECT_DIR) and FLASK_JSONRPC_PROJECT_DIR not in sys.path:
sys.path.append(FLASK_JSONRPC_PROJECT_DIR)
from flask_jsonrpc import JSONRPC # noqa: E402 pylint: disable=C0413
from api.user import user # noqa: E402 pylint: disable=C0413,E0611
from api.article import article # noqa: E402 pylint: disable=C0413,E0611
app = Flask('modular')
jsonrpc = JSONRPC(app, '/api', enable_web_browsable_api=True)
jsonrpc.register_blueprint(app, user, url_prefix='/user', enable_web_browsable_api=True)
jsonrpc.register_blueprint(app, article, url_prefix='/article', enable_web_browsable_api=True)
@jsonrpc.method('App.index')
def index() -> str:
return 'Welcome to Flask JSON-RPC'
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| bsd-3-clause |
HazyResearch/snorkel | tutorials/workshop/lib/lfs.py | 1 | 2490 | #
# PLACE YOUR LFs HERE
#
import re
from snorkel.lf_helpers import (
get_left_tokens, get_right_tokens, get_between_tokens,
get_text_between, get_tagged_text,
)
from lib.dbpedia import known_spouses
# Helper function to get last name
def last_name(s):
name_parts = s.split(' ')
return name_parts[-1] if len(name_parts) > 1 else None
# Last name pairs for known spouses
last_names = set([(last_name(x), last_name(y)) for x, y in known_spouses if last_name(x) and last_name(y)])
spouses = {'spouse', 'wife', 'husband', 'ex-wife', 'ex-husband'}
family = {'father', 'mother', 'sister', 'brother', 'son', 'daughter',
'grandfather', 'grandmother', 'uncle', 'aunt', 'cousin'}
family = family | {f + '-in-law' for f in family}
other = {'boyfriend', 'girlfriend' 'boss', 'employee', 'secretary', 'co-worker'}
#
# Pattern LFs
#
def LF_husband_wife(c):
return 1 if len(spouses.intersection(get_between_tokens(c))) > 0 else 0
def LF_husband_wife_left_window(c):
if len(spouses.intersection(get_left_tokens(c[0], window=2))) > 0:
return 1
elif len(spouses.intersection(get_left_tokens(c[1], window=2))) > 0:
return 1
else:
return 0
def LF_same_last_name(c):
p1_last_name = last_name(c.person1.get_span())
p2_last_name = last_name(c.person2.get_span())
if p1_last_name and p2_last_name and p1_last_name == p2_last_name:
if c.person1.get_span() != c.person2.get_span():
return 1
return 0
def LF_and_married(c):
return 1 if 'and' in get_between_tokens(c) and 'married' in get_right_tokens(c) else 0
def LF_familial_relationship(c):
return -1 if len(family.intersection(get_between_tokens(c))) > 0 else 0
def LF_family_left_window(c):
if len(family.intersection(get_left_tokens(c[0], window=2))) > 0:
return -1
elif len(family.intersection(get_left_tokens(c[1], window=2))) > 0:
return -1
else:
return 0
def LF_other_relationship(c):
return -1 if len(other.intersection(get_between_tokens(c))) > 0 else 0
#
# Distant Supervision
#
def LF_distant_supervision(c):
p1, p2 = c.person1.get_span(), c.person2.get_span()
return 1 if (p1, p2) in known_spouses or (p2, p1) in known_spouses else 0
def LF_distant_supervision_last_names(c):
p1, p2 = c.person1.get_span(), c.person2.get_span()
p1n, p2n = last_name(p1), last_name(p2)
return 1 if (p1 != p2) and ((p1n, p2n) in last_names or (p2n, p1n) in last_names) else 0
| apache-2.0 |
analogbyte/ansible-modules-extras | source_control/gitlab_user.py | 14 | 12435 | #!/usr/bin/python
# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gitlab_user
short_description: Creates/updates/deletes Gitlab Users
description:
- When the user does not exists in Gitlab, it will be created.
- When the user does exists and state=absent, the user will be deleted.
- When changes are made to user, the user will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
name:
description:
- Name of the user you want to create
required: true
username:
description:
- The username of the user.
required: true
password:
description:
- The password of the user.
required: true
email:
description:
- The email that belongs to the user.
required: true
sshkey_name:
description:
- The name of the sshkey
required: false
default: null
sshkey_file:
description:
- The ssh key itself.
required: false
default: null
group:
description:
- Add user as an member to this group.
required: false
default: null
access_level:
description:
- The access level to the group. One of the following can be used.
- guest
- reporter
- developer
- master
- owner
required: false
default: null
state:
description:
- create or delete group.
- Possible values are present and absent.
required: false
default: present
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: "Delete Gitlab User"
local_action: gitlab_user
server_url="http://gitlab.dj-wasabi.local"
validate_certs=false
login_token="WnUzDsxjy8230-Dy_k"
username=myusername
state=absent
- name: "Create Gitlab User"
local_action: gitlab_user
server_url="https://gitlab.dj-wasabi.local"
validate_certs=true
login_user=dj-wasabi
login_password="MySecretPassword"
name=My Name
username=myusername
password=mysecretpassword
email=me@home.com
sshkey_name=MySSH
sshkey_file=ssh-rsa AAAAB3NzaC1yc...
state=present
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
class GitLabUser(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def addToGroup(self, group_id, user_id, access_level):
if access_level == "guest":
level = 10
elif access_level == "reporter":
level = 20
elif access_level == "developer":
level = 30
elif access_level == "master":
level = 40
elif access_level == "owner":
level = 50
return self._gitlab.addgroupmember(group_id, user_id, level)
def createOrUpdateUser(self, user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level):
group_id = ''
arguments = {"name": user_name,
"username": user_username,
"email": user_email}
if group_name is not None:
if self.existsGroup(group_name):
group_id = self.getGroupId(group_name)
if self.existsUser(user_username):
self.updateUser(group_id, user_sshkey_name, user_sshkey_file, access_level, arguments)
else:
if self._module.check_mode:
self._module.exit_json(changed=True)
self.createUser(group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments)
def createUser(self, group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
# Create the user
user_username = arguments['username']
user_name = arguments['name']
user_email = arguments['email']
if self._gitlab.createuser(password=user_password, **arguments):
user_id = self.getUserId(user_username)
if self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
# Add the user to the group if group_id is not empty
if group_id != '':
if self.addToGroup(group_id, user_id, access_level):
user_changed = True
user_changed = True
# Exit with change to true or false
if user_changed:
self._module.exit_json(changed=True, result="Created the user")
else:
self._module.exit_json(changed=False)
def deleteUser(self, user_username):
user_id = self.getUserId(user_username)
if self._gitlab.deleteuser(user_id):
self._module.exit_json(changed=True, result="Successfully deleted user %s" % user_username)
else:
self._module.exit_json(changed=False, result="User %s already deleted or something went wrong" % user_username)
def existsGroup(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
return False
def existsUser(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return True
return False
def getGroupId(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getUserId(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return user['id']
def updateUser(self, group_id, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
user_username = arguments['username']
user_id = self.getUserId(user_username)
user_data = self._gitlab.getuser(user_id=user_id)
# Lets check if we need to update the user
for arg_key, arg_value in arguments.items():
if user_data[arg_key] != arg_value:
user_changed = True
if user_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._gitlab.edituser(user_id=user_id, **arguments)
user_changed = True
if self._module.check_mode or self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
if group_id != '':
if self._module.check_mode or self.addToGroup(group_id, user_id, access_level):
user_changed = True
if user_changed:
self._module.exit_json(changed=True, result="The user %s is updated" % user_username)
else:
self._module.exit_json(changed=False, result="The user %s is already up2date" % user_username)
def main():
global user_id
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type=bool, aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
name=dict(required=True),
username=dict(required=True),
password=dict(required=True),
email=dict(required=True),
sshkey_name=dict(required=False),
sshkey_file=dict(required=False),
group=dict(required=False),
access_level=dict(required=False, choices=["guest", "reporter", "developer", "master", "owner"]),
state=dict(default="present", choices=["present", "absent"]),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
user_name = module.params['name']
user_username = module.params['username']
user_password = module.params['password']
user_email = module.params['email']
user_sshkey_name = module.params['sshkey_name']
user_sshkey_file = module.params['sshkey_file']
group_name = module.params['group']
access_level = module.params['access_level']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Check if vars are none
if user_sshkey_file is not None and user_sshkey_name is not None:
use_sshkey = True
else:
use_sshkey = False
if group_name is not None and access_level is not None:
add_to_group = True
group_name = group_name.lower()
else:
add_to_group = False
user_username = user_username.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception, e:
module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
# Validate if group exists and take action based on "state"
user = GitLabUser(module, git)
# Check if user exists, if not exists and state = absent, we exit nicely.
if not user.existsUser(user_username) and state == "absent":
module.exit_json(changed=False, result="User already deleted or does not exists")
else:
# User exists,
if state == "absent":
user.deleteUser(user_username)
else:
user.createOrUpdateUser(user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
RYWU/slick | slick/blueprints/vm/views.py | 4 | 8583 | from __future__ import division
import json
from flask import redirect, url_for, flash, request, render_template
from slick.utils.core import get_client
from slick.utils.nested_dict import lookup
from slick.utils.session import login_required
from . import forms, manager
@login_required
def change_nic_speed(object_id, nic, speed):
""" This function will alter the port speed of the specified NIC on the
VM. It's designed to be called via AJAX.
:param int object_id: The ID of the instance to change
:param string nic: The identifier of the network interface to change
:param int speed: The speed to change the interface to
"""
(success, message) = manager.change_port_speed(object_id, nic, speed)
return json.dumps({'success': success, 'message': message})
@login_required
def cancel(vm_id):
""" This function will cancel the specified virtual machine.
:param int vm_id: The ID of the instance to change
"""
(success, message) = manager.cancel_instance(vm_id)
return json.dumps({'success': success, 'message': message})
@login_required
def create():
""" Provides an interface for creating a new virtual machine. """
# Setup the form choices here since we need access to the client object
# in order to do so.
form = forms.CreateVMForm()
all_options = manager.all_instance_options('')
dc_options = [('', 'First Available')]
dc_options += all_options['datacenter']
form.datacenter.choices = dc_options
os_groups = {}
for os in all_options['os']:
group = os[0].split('_')[0].lower()
if group not in os_groups:
os_groups[group] = []
# Name cleanup to help with display
name = os[1]
bad_strings = [' GNU/Linux', ' Install', '/Stable', ' Linux']
for string in bad_strings:
name = name.replace(string, '')
os_groups[group].append((os[0], name))
form.os.choices = all_options['os']
# os_options = [('', '-- Select --')] + all_options['os']
# form.os.choices = os_options
cpu_options = [('', '-- Select --')] + all_options['cpus']
form.cpus.choices = cpu_options
ram_options = [('', '-- Select --')] + all_options['memory']
form.memory.choices = ram_options
if form.validate_on_submit():
fields = {}
for field in form:
if 'csrf_token' == field.name:
continue
fields[field.name] = field.data
(success, message) = manager.launch_instance(**fields)
if success:
flash(message, 'success')
# TODO - This is not implemented yet
if request.form.get('save_template'):
template_name = request.form.get('template_title')
fields['title'] = template_name
_save_template(fields)
flash('Configuration saved for future use.', 'success')
return redirect(url_for(".index"))
else:
flash(message, 'error')
if form.errors:
flash('There are validation errors with your submission.', 'error')
os_names = {
'centos': 'CentOS',
'vyattace': 'Vyatta CE',
'win': 'Windows',
}
payload = {
'title': 'Create Instance',
'form': form,
'os_groups': os_groups,
'os_names': os_names,
}
return render_template('vm_add.html', **payload)
@login_required
def edit(vm_id):
""" Provides an interface for a user to update some information about an
existing virtual machine.
:param int vm_id: The ID of the VM to edit
"""
instance = manager.get_instance(vm_id)
if not instance:
flash('Invalid virtual machine specified.', 'error')
return redirect(url_for('.index'))
instance['vm_id'] = instance['id']
form = forms.EditVMForm(**instance)
if form.validate_on_submit():
fields = {}
for field in form:
if 'csrf_token' == field.name:
continue
fields[field.name] = field.data
(success, message) = manager.edit_instance(**fields)
if success:
flash(message, 'success')
return redirect(url_for(".index"))
else:
flash(message, 'error')
if form.errors:
flash('There are validation errors with your submission.', 'error')
payload = {
'title': 'Edit Instance',
'form': form,
'instance': instance,
}
return render_template('vm_edit.html', **payload)
@login_required
def get_password(object_id, username):
""" This function is called via AJAX to retrieve the root/admin password
for the specified machine and account.
:param int object_id: The VM ID to retrieve the password for.
:param string username: The specific admin account that owns the password.
"""
instance = manager.get_instance(object_id, True)
if not instance:
return 'Invalid account'
password = 'Password not found'
for account in lookup(instance, 'operatingSystem', 'passwords'):
if username == account['username']:
password = account['password']
return password
@login_required
def hard_reboot_vm(vm_id):
""" AJAX call to hard reboot a VM.
:param int vm_id: The ID of the VM to reboot
"""
(success, message) = manager.reboot_instance(vm_id, False)
return json.dumps({'success': success, 'message': message})
@login_required
def index():
""" This function creates a tabular list of all VMs on the user's account.
"""
instances = manager.all_instances()
payload = {
'title': 'List Instances',
'instances': instances,
'submenu': [(url_for('.create'), 'Create Instance')],
}
search = ''
if request.args.get('dc'):
search = request.args.get('dc')
payload['search'] = search
return render_template("vm_index.html", **payload)
@login_required
def price_check():
""" AJAX call to perform a price check on a new VM order. It takes in the
entire VM creation form, runs it through the validation API call, and then
returns the results for display.
"""
form = forms.CreateVMForm()
fields = {}
for field in form:
if 'csrf_token' == field.name:
continue
if request.form.get(field.name):
fields[field.name] = request.form[field.name]
results = manager.validate_instance(**fields)
return render_template('vm_price_quote.html', order_template=results)
@login_required
def reload_vm(vm_id):
""" AJAX call to reload a VM.
:param int vm_id: The ID of the VM to reload
"""
(success, message) = manager.reload_instance(vm_id)
return json.dumps({'success': success, 'message': message})
@login_required
def soft_reboot_vm(vm_id):
""" AJAX call to soft reboot a VM.
:param int vm_id: The ID of the VM to reboot
"""
(success, message) = manager.reboot_instance(vm_id)
return json.dumps({'success': success, 'message': message})
@login_required
def start_vm(vm_id):
""" AJAX call to start a halted VM.
:param int vm_id: The ID of the VM to start
"""
(success, message) = manager.start_vm(vm_id)
return json.dumps({'success': success, 'message': message})
@login_required
def stop_vm(vm_id):
""" AJAX call to stop a running VM.
:param int vm_id: The ID of the VM to stop
"""
(success, message) = manager.stop_vm(vm_id)
return json.dumps({'success': success, 'message': message})
@login_required
def status(vm_id):
""" AJAX call to run a status check against a single VM. This is used with
a Javascript timer to update the index for VMs that have active
transactions.
:param int vm_id: The ID of the VM you want the status for
"""
if not vm_id:
return None
instance = manager.get_instance(vm_id)
if not instance:
return ''
html = render_template('vm_instance_row.html', instance=instance)
return json.dumps({
'active': instance['active'],
'row_html': html,
})
@login_required
def view(vm_id):
""" Provides a complete view page for a single VM.
:param int vm_id: The ID of the VM to view
"""
instance = manager.get_instance(vm_id)
payload = {
'title': 'View VM',
'subheader': instance['fqdn'],
'object': instance,
'module': 'vm_module',
'submenu': [(url_for('.edit', vm_id=vm_id), 'Edit')],
}
return render_template('shared/object_view.html', **payload)
| mit |
romankagan/DDBWorkbench | plugins/hg4idea/testData/bin/mercurial/sshrepo.py | 88 | 8196 | # sshrepo.py - ssh repository proxy class for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import bin, hex
from i18n import _
import repo, util, error, encoding
import re, urllib
class remotelock(object):
def __init__(self, repo):
self.repo = repo
def release(self):
self.repo.unlock()
self.repo = None
def __del__(self):
if self.repo:
self.release()
class sshrepository(repo.repository):
def __init__(self, ui, path, create=0):
self._url = path
self.ui = ui
m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
if not m:
self.abort(error.RepoError(_("couldn't parse location %s") % path))
self.user = m.group(2)
self.host = m.group(3)
self.port = m.group(5)
self.path = m.group(7) or "."
sshcmd = self.ui.config("ui", "ssh", "ssh")
remotecmd = self.ui.config("ui", "remotecmd", "hg")
args = util.sshargs(sshcmd, self.host, self.user, self.port)
if create:
cmd = '%s %s "%s init %s"'
cmd = cmd % (sshcmd, args, remotecmd, self.path)
ui.note(_('running %s\n') % cmd)
res = util.system(cmd)
if res != 0:
self.abort(error.RepoError(_("could not create remote repo")))
self.validate_repo(ui, sshcmd, args, remotecmd)
def url(self):
return self._url
def validate_repo(self, ui, sshcmd, args, remotecmd):
# cleanup up previous run
self.cleanup()
cmd = '%s %s "%s -R %s serve --stdio"'
cmd = cmd % (sshcmd, args, remotecmd, self.path)
cmd = util.quotecommand(cmd)
ui.note(_('running %s\n') % cmd)
self.pipeo, self.pipei, self.pipee = util.popen3(cmd)
# skip any noise generated by remote shell
self.do_cmd("hello")
r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
lines = ["", "dummy"]
max_noise = 500
while lines[-1] and max_noise:
l = r.readline()
self.readerr()
if lines[-1] == "1\n" and l == "\n":
break
if l:
ui.debug("remote: ", l)
lines.append(l)
max_noise -= 1
else:
self.abort(error.RepoError(_("no suitable response from remote hg")))
self.capabilities = set()
for l in reversed(lines):
if l.startswith("capabilities:"):
self.capabilities.update(l[:-1].split(":")[1].split())
break
def readerr(self):
while 1:
size = util.fstat(self.pipee).st_size
if size == 0:
break
l = self.pipee.readline()
if not l:
break
self.ui.status(_("remote: "), l)
def abort(self, exception):
self.cleanup()
raise exception
def cleanup(self):
try:
self.pipeo.close()
self.pipei.close()
# read the error descriptor until EOF
for l in self.pipee:
self.ui.status(_("remote: "), l)
self.pipee.close()
except:
pass
__del__ = cleanup
def do_cmd(self, cmd, **args):
self.ui.debug("sending %s command\n" % cmd)
self.pipeo.write("%s\n" % cmd)
for k, v in args.iteritems():
self.pipeo.write("%s %d\n" % (k, len(v)))
self.pipeo.write(v)
self.pipeo.flush()
return self.pipei
def call(self, cmd, **args):
self.do_cmd(cmd, **args)
return self._recv()
def _recv(self):
l = self.pipei.readline()
self.readerr()
try:
l = int(l)
except:
self.abort(error.ResponseError(_("unexpected response:"), l))
return self.pipei.read(l)
def _send(self, data, flush=False):
self.pipeo.write("%d\n" % len(data))
if data:
self.pipeo.write(data)
if flush:
self.pipeo.flush()
self.readerr()
def lock(self):
self.call("lock")
return remotelock(self)
def unlock(self):
self.call("unlock")
def lookup(self, key):
self.requirecap('lookup', _('look up remote revision'))
d = self.call("lookup", key=key)
success, data = d[:-1].split(" ", 1)
if int(success):
return bin(data)
else:
self.abort(error.RepoError(data))
def heads(self):
d = self.call("heads")
try:
return map(bin, d[:-1].split(" "))
except:
self.abort(error.ResponseError(_("unexpected response:"), d))
def branchmap(self):
d = self.call("branchmap")
try:
branchmap = {}
for branchpart in d.splitlines():
branchheads = branchpart.split(' ')
branchname = urllib.unquote(branchheads[0])
# Earlier servers (1.3.x) send branch names in (their) local
# charset. The best we can do is assume it's identical to our
# own local charset, in case it's not utf-8.
try:
branchname.decode('utf-8')
except UnicodeDecodeError:
branchname = encoding.fromlocal(branchname)
branchheads = [bin(x) for x in branchheads[1:]]
branchmap[branchname] = branchheads
return branchmap
except:
raise error.ResponseError(_("unexpected response:"), d)
def branches(self, nodes):
n = " ".join(map(hex, nodes))
d = self.call("branches", nodes=n)
try:
br = [tuple(map(bin, b.split(" "))) for b in d.splitlines()]
return br
except:
self.abort(error.ResponseError(_("unexpected response:"), d))
def between(self, pairs):
n = " ".join(["-".join(map(hex, p)) for p in pairs])
d = self.call("between", pairs=n)
try:
p = [l and map(bin, l.split(" ")) or [] for l in d.splitlines()]
return p
except:
self.abort(error.ResponseError(_("unexpected response:"), d))
def changegroup(self, nodes, kind):
n = " ".join(map(hex, nodes))
return self.do_cmd("changegroup", roots=n)
def changegroupsubset(self, bases, heads, kind):
self.requirecap('changegroupsubset', _('look up remote changes'))
bases = " ".join(map(hex, bases))
heads = " ".join(map(hex, heads))
return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
def unbundle(self, cg, heads, source):
d = self.call("unbundle", heads=' '.join(map(hex, heads)))
if d:
# remote may send "unsynced changes"
self.abort(error.RepoError(_("push refused: %s") % d))
while 1:
d = cg.read(4096)
if not d:
break
self._send(d)
self._send("", flush=True)
r = self._recv()
if r:
# remote may send "unsynced changes"
self.abort(error.RepoError(_("push failed: %s") % r))
r = self._recv()
try:
return int(r)
except:
self.abort(error.ResponseError(_("unexpected response:"), r))
def addchangegroup(self, cg, source, url):
d = self.call("addchangegroup")
if d:
self.abort(error.RepoError(_("push refused: %s") % d))
while 1:
d = cg.read(4096)
if not d:
break
self.pipeo.write(d)
self.readerr()
self.pipeo.flush()
self.readerr()
r = self._recv()
if not r:
return 1
try:
return int(r)
except:
self.abort(error.ResponseError(_("unexpected response:"), r))
def stream_out(self):
return self.do_cmd('stream_out')
instance = sshrepository
| apache-2.0 |
roadmapper/ansible | lib/ansible/modules/storage/netapp/na_ontap_node.py | 21 | 4294 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_node
short_description: NetApp ONTAP Rename a node.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Rename an ONTAP node.
options:
name:
description:
- The new name for the node
required: true
from_name:
description:
- The name of the node to be renamed. If I(name) already exists, no action will be performed.
required: true
'''
EXAMPLES = """
- name: rename node
na_ontap_node:
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
from_name: laurentn-vsim1
name: laurentncluster-2
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapNode(object):
"""
Rename node
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
from_name=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.cluster = netapp_utils.setup_na_ontap_zapi(module=self.module)
return
def rename_node(self):
"""
Rename an existing node
:return: none
"""
node_obj = netapp_utils.zapi.NaElement('system-node-rename')
node_obj.add_new_child('node', self.parameters['from_name'])
node_obj.add_new_child('new-name', self.parameters['name'])
try:
self.cluster.invoke_successfully(node_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating node: %s' %
(to_native(error)),
exception=traceback.format_exc())
def get_node(self, name):
node_obj = netapp_utils.zapi.NaElement('system-node-get')
node_obj.add_new_child('node', name)
try:
self.cluster.invoke_successfully(node_obj, True)
except netapp_utils.zapi.NaApiError as error:
if to_native(error.code) == "13115":
# 13115 (EINVALIDINPUTERROR) if the node does not exist
return None
else:
self.module.fail_json(msg=to_native(
error), exception=traceback.format_exc())
return True
def apply(self):
# logging ems event
results = netapp_utils.get_cserver(self.cluster)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_node", cserver)
exists = self.get_node(self.parameters['name'])
from_exists = self.get_node(self.parameters['from_name'])
changed = False
if exists:
pass
else:
if from_exists:
self.rename_node()
changed = True
else:
self.module.fail_json(msg='Error renaming node, from_name %s does not exist' % self.parameters['from_name'])
self.module.exit_json(changed=changed)
def main():
"""
Start, Stop and Enable node services.
"""
obj = NetAppOntapNode()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
uprare/p2pool | SOAPpy/SOAPBuilder.py | 289 | 22852 | """
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: SOAPBuilder.py 1498 2010-03-12 02:13:19Z pooryorick $'
from version import __version__
import cgi
from wstools.XMLname import toXMLname, fromXMLname
import fpconst
# SOAPpy modules
from Config import Config
from NS import NS
from Types import *
# Test whether this Python version has Types.BooleanType
# If it doesn't have it, then False and True are serialized as integers
try:
BooleanType
pythonHasBooleanType = 1
except NameError:
pythonHasBooleanType = 0
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = ( '%(ENV_T)s:Envelope\n' + \
' %(ENV_T)s:encodingStyle="%(ENC)s"\n' ) % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config, noroot = 0):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = []
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
self.noroot = noroot
def build(self):
if Config.debug: print "In build."
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
#self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out.append("<%sBody>\n" % body_ns)
if self.method:
# Save the NS map so that it can be restored when we
# fall out of the scope of the method definition
save_ns_map = ns_map.copy()
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out.append('<%s%s%s%s%s>\n' % (
methodns, self.method, n, a, self.genroot(ns_map)))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
if hasattr(self.config, "argsOrdering") and self.config.argsOrdering.has_key(self.method):
for k in self.config.argsOrdering.get(self.method):
self.dump(self.kw.get(k), k, typed = typed, ns_map = ns_map)
else:
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out.append("</%s%s>\n" % (methodns, self.method))
# End of the method definition; drop any local namespaces
ns_map = save_ns_map
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out.append("</%sBody>\n" % body_ns)
self.depth -= 1
if self.envelope:
e = map (lambda ns: ' xmlns:%s="%s"\n' % (ns[1], ns[0]),
self.envns.items())
self.out = ['<', self._env_top] + e + ['>\n'] + \
self.out + \
[self._env_bot]
if self.encoding != None:
self.out.insert(0, self._xml_enc_top % self.encoding)
return ''.join(self.out).encode(self.encoding)
self.out.insert(0, self._xml_top)
return ''.join(self.out)
def gentag(self):
if Config.debug: print "In gentag."
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.noroot:
return ''
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out.append('<%s href="#i%d"%s/>\n' %
(tag, n, self.genroot(ns_map)))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if Config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
self.dump_dispatch(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if Config.debug: print "In dumper."
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except:
if (obj_type != "string"): # strings are already encoded
data = cgi.escape(str(obj))
else:
data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_float."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if Config.strict_range:
doubleType(obj)
if fpconst.isPosInf(obj):
obj = "INF"
elif fpconst.isNegInf(obj):
obj = "-INF"
elif fpconst.isNaN(obj):
obj = "NaN"
else:
obj = repr(obj)
# Note: python 'float' is actually a SOAP 'double'.
self.out.append(self.dumper(
None, "double", obj, tag, typed, ns_map, self.genroot(ns_map)))
def dump_int(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_int."
self.out.append(self.dumper(None, 'integer', obj, tag, typed,
ns_map, self.genroot(ns_map)))
def dump_bool(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_bool."
self.out.append(self.dumper(None, 'boolean', obj, tag, typed,
ns_map, self.genroot(ns_map)))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_string."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out.append(self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id))
dump_str = dump_string # For Python 2.2+
dump_unicode = dump_string
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_None."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out.append('<%s %snull="1"%s/>\n' %
(tag, ns, self.genroot(ns_map)))
dump_NoneType = dump_None # For Python 2.2+
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_list.", "obj=", obj
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
if typed:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
# preserve type if present
if getattr(obj,"_typed",None) and getattr(obj,"_type",None):
if getattr(obj, "_complexType", None):
sample = typedArrayType(typed=obj._type,
complexType = obj._complexType)
sample._typename = obj._type
if not getattr(obj,"_ns",None): obj._ns = NS.URN
else:
sample = typedArrayType(typed=obj._type)
else:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType or \
(isinstance(sample, anyType) and \
(getattr(sample, "_complexType", None) and \
sample._complexType)): # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = sample._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + str(sample._type)
else:
t = 'ur-type'
else:
typename = type(sample).__name__
# For Python 2.2+
if type(sample) == StringType: typename = 'string'
# HACK: unicode is a SOAP string
if type(sample) == UnicodeType: typename = 'string'
# HACK: python 'float' is actually a SOAP 'double'.
if typename=="float": typename="double"
t = self.genns(
ns_map, self.config.typesNamespaceURI)[0] + typename
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
if typed:
self.out.append(
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
if typed:
try: elemsname = obj._elemsname
except: elemsname = "item"
else:
elemsname = tag
if isinstance(data, (list, tuple, arrayType)):
should_drill = True
else:
should_drill = not same_type
for i in data:
self.dump(i, elemsname, should_drill, ns_map)
if typed: self.out.append('</%s>\n' % tag)
dump_tuple = dump_list
def dump_exception(self, obj, tag, typed = 0, ns_map = {}):
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out.append('<%sFault %sroot="1"%s%s>' % (vns, cns, vdecl, cdecl))
self.dump(obj.faultcode, "faultcode", typed, ns_map)
self.dump(obj.faultstring, "faultstring", typed, ns_map)
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out.append("</%sFault>\n" % vns)
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_dictionary."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out.append('<%s%s%s%s>\n' %
(tag, id, a, self.genroot(ns_map)))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
dump_dict = dump_dictionary # For Python 2.2+
def dump_dispatch(self, obj, tag, typed = 1, ns_map = {}):
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
# watch out for order!
dumpmap = (
(Exception, self.dump_exception),
(arrayType, self.dump_list),
(basestring, self.dump_string),
(NoneType, self.dump_None),
(bool, self.dump_bool),
(int, self.dump_int),
(long, self.dump_int),
(list, self.dump_list),
(tuple, self.dump_list),
(dict, self.dump_dictionary),
(float, self.dump_float),
)
for dtype, func in dumpmap:
if isinstance(obj, dtype):
func(obj, tag, typed, ns_map)
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out.append("<%s%s%s></%s>\n" % (tag, a, r, tag))
else:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out.append("<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r))
keylist = obj.__dict__.keys()
# first write out items with order information
if hasattr(obj, '_keyord'):
for i in range(len(obj._keyord)):
self.dump(obj._aslist(i), obj._keyord[i], 1, ns_map)
keylist.remove(obj._keyord[i])
# now write out the rest
for k in keylist:
if (k[0] != "_"):
self.dump(getattr(obj,k), k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out.append('</%s>\n' % tag)
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out.append('<%s%s%s%s%s>%s</%s>\n' %
(tag, t, id, a, r, obj._marshalData(), tag))
else: # Some Class
self.out.append('<%s%s%s>\n' % (tag, id, r))
d1 = getattr(obj, '__dict__', None)
if d1 is not None:
for (k, v) in d1:
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None,
header=None, methodattrs=None, envelope=1, encoding='UTF-8',
config=Config, noroot = 0):
t = SOAPBuilder(args=args, kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config,noroot=noroot)
return t.build()
| gpl-3.0 |
Evervolv/android_kernel_htc_qsd8k | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
googleapis/python-analytics-admin | samples/accounts_user_links_update_test.py | 1 | 1097 | # Copyright 2021 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import accounts_user_links_update
FAKE_ACCOUNT_ID = "1"
FAKE_ACCOUNT_USER_LINK_ID = "1"
def test_accounts_user_links_update():
# This test ensures that the call is valid and reaches the server, even
# though the operation does not succeed due to permission error.
with pytest.raises(Exception, match="403 The caller does not have permission"):
accounts_user_links_update.update_account_user_link(
FAKE_ACCOUNT_ID, FAKE_ACCOUNT_USER_LINK_ID
)
| apache-2.0 |
theju/atifier | server/core/migrations/0001_initial.py | 1 | 1971 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-15 12:20
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PageScrapeResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('output', models.TextField(null=True)),
('hash', models.TextField(null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-updated_on', '-created_on'],
},
),
migrations.CreateModel(
name='WebPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('feed_name', models.CharField(max_length=50, unique=True, validators=[django.core.validators.RegexValidator(regex='[\\w\\-]+')])),
('url', models.URLField()),
('selector', models.TextField()),
('interval', models.PositiveIntegerField(default=5)),
('max_results', models.PositiveIntegerField(default=100)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-updated_on', '-created_on'],
},
),
migrations.AddField(
model_name='pagescraperesult',
name='page',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.WebPage'),
),
]
| mit |
Dobatymo/livestreamer | src/livestreamer/packages/pbs.py | 37 | 14321 | #===============================================================================
# Copyright (C) 2011-2012 by Andrew Moffat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#===============================================================================
import subprocess as subp
import sys
import traceback
import os
import re
from glob import glob as original_glob
from types import ModuleType
from functools import partial
import warnings
import platform
__version__ = "0.110"
__project_url__ = "https://github.com/amoffat/pbs"
IS_PY3 = sys.version_info[0] == 3
if IS_PY3:
raw_input = input
unicode = str
else:
pass
DEFAULT_ENCODING = "utf-8"
class ErrorReturnCode(Exception):
truncate_cap = 200
def __init__(self, full_cmd, stdout, stderr):
self.full_cmd = full_cmd
self.stdout = stdout
self.stderr = stderr
if self.stdout is None: tstdout = "<redirected>"
else:
tstdout = self.stdout[:self.truncate_cap]
out_delta = len(self.stdout) - len(tstdout)
if out_delta:
tstdout += ("... (%d more, please see e.stdout)" % out_delta).encode()
if self.stderr is None: tstderr = "<redirected>"
else:
tstderr = self.stderr[:self.truncate_cap]
err_delta = len(self.stderr) - len(tstderr)
if err_delta:
tstderr += ("... (%d more, please see e.stderr)" % err_delta).encode()
msg = "\n\nRan: %r\n\nSTDOUT:\n\n %s\n\nSTDERR:\n\n %s" %\
(full_cmd, tstdout.decode(DEFAULT_ENCODING, "replace"),
tstderr.decode(DEFAULT_ENCODING, "replace"))
super(ErrorReturnCode, self).__init__(msg)
class CommandNotFound(Exception): pass
rc_exc_regex = re.compile("ErrorReturnCode_(\d+)")
rc_exc_cache = {}
def get_rc_exc(rc):
rc = int(rc)
try: return rc_exc_cache[rc]
except KeyError: pass
name = "ErrorReturnCode_%d" % rc
exc = type(name, (ErrorReturnCode,), {})
rc_exc_cache[rc] = exc
return exc
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program): return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def resolve_program(program):
path = which(program)
if not path:
# our actual command might have a dash in it, but we can't call
# that from python (we have to use underscores), so we'll check
# if a dash version of our underscore command exists and use that
# if it does
if "_" in program: path = which(program.replace("_", "-"))
if not path: return None
return path
def glob(arg):
return original_glob(arg) or arg
def create_command(cmd):
return Command._create(cmd)
class RunningCommand(object):
def __init__(self, command_ran, process, call_args, stdin=None):
self.command_ran = command_ran
self.process = process
self._stdout = None
self._stderr = None
self.call_args = call_args
# we're running in the background, return self and let us lazily
# evaluate
if self.call_args["bg"]:
return
# we're running this command as a with context, don't do anything
# because nothing was started to run from Command.__call__
if self.call_args["with"]: return
# run and block
if stdin: stdin = stdin.encode(DEFAULT_ENCODING)
self._stdout, self._stderr = self.process.communicate(stdin)
self._handle_exit_code(self.process.wait())
def __enter__(self):
# we don't actually do anything here because anything that should
# have been done would have been done in the Command.__call__ call.
# essentially all that has to happen is the comand be pushed on
# the prepend stack.
pass
def __exit__(self, typ, value, traceback):
if self.call_args["with"] and Command._prepend_stack:
Command._prepend_stack.pop()
def __str__(self):
if IS_PY3: return self.__unicode__()
else: return unicode(self).encode(DEFAULT_ENCODING)
def __unicode__(self):
if self.process:
if self.call_args["bg"]: self.wait()
if self._stdout: return self.stdout()
else: return ""
def __eq__(self, other):
return unicode(self) == unicode(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __contains__(self, item):
return item in str(self)
def __getattr__(self, p):
# let these three attributes pass through to the Popen object
if p in ("send_signal", "terminate", "kill"):
if self.process: return getattr(self.process, p)
else: raise AttributeError
return getattr(unicode(self), p)
def __repr__(self):
return "<RunningCommand %r, pid:%d, special_args:%r" % (
self.command_ran, self.process.pid, self.call_args)
def __long__(self):
return long(str(self).strip())
def __float__(self):
return float(str(self).strip())
def __int__(self):
return int(str(self).strip())
def stdout(self):
if self.call_args["bg"]: self.wait()
return self._stdout.decode(DEFAULT_ENCODING, "replace")
def stderr(self):
if self.call_args["bg"]: self.wait()
return self._stderr.decode(DEFAULT_ENCODING, "replace")
def wait(self):
self._stdout, self._stderr = self.process.communicate()
self._handle_exit_code(self.process.wait())
return str(self)
def _handle_exit_code(self, rc):
if rc not in self.call_args["ok_code"]:
raise get_rc_exc(rc)(self.command_ran, self._stdout, self._stderr)
def __len__(self):
return len(str(self))
class Command(object):
_prepend_stack = []
call_args = {
"fg": False, # run command in foreground
"bg": False, # run command in background
"with": False, # prepend the command to every command after it
"out": None, # redirect STDOUT
"err": None, # redirect STDERR
"err_to_out": None, # redirect STDERR to STDOUT
"in": None,
"env": os.environ,
"cwd": None,
# this is for commands that may have a different exit status than the
# normal 0. this can either be an integer or a list/tuple of ints
"ok_code": 0,
}
@classmethod
def _create(cls, program):
path = resolve_program(program)
if not path: raise CommandNotFound(program)
return cls(path)
def __init__(self, path):
self._path = path
self._partial = False
self._partial_baked_args = []
self._partial_call_args = {}
def __getattribute__(self, name):
# convenience
getattr = partial(object.__getattribute__, self)
if name.startswith("_"): return getattr(name)
if name == "bake": return getattr("bake")
return getattr("bake")(name)
@staticmethod
def _extract_call_args(kwargs):
kwargs = kwargs.copy()
call_args = Command.call_args.copy()
for parg, default in call_args.items():
key = "_" + parg
if key in kwargs:
call_args[parg] = kwargs[key]
del kwargs[key]
return call_args, kwargs
def _format_arg(self, arg):
if IS_PY3:
arg = str(arg)
else:
try:
arg = unicode(arg, DEFAULT_ENCODING).encode(DEFAULT_ENCODING)
except TypeError:
arg = unicode(arg).encode(DEFAULT_ENCODING)
if self._partial:
escaped = arg.replace('"', '\\"')
escaped = escaped.replace("$", "\$")
escaped = escaped.replace("`", "\`")
arg = '"{0}"'.format(escaped)
return arg
def _compile_args(self, args, kwargs):
processed_args = []
# aggregate positional args
for arg in args:
if isinstance(arg, (list, tuple)):
if not arg:
warnings.warn("Empty list passed as an argument to %r. \
If you're using glob.glob(), please use pbs.glob() instead." % self.path, stacklevel=3)
for sub_arg in arg: processed_args.append(self._format_arg(sub_arg))
else: processed_args.append(self._format_arg(arg))
# aggregate the keyword arguments
for k,v in kwargs.items():
# we're passing a short arg as a kwarg, example:
# cut(d="\t")
if len(k) == 1:
processed_args.append("-"+k)
if v is not True: processed_args.append(self._format_arg(v))
# we're doing a long arg
else:
k = k.replace("_", "-")
if v is True: processed_args.append("--"+k)
else: processed_args.append("--%s=%s" % (k, self._format_arg(v)))
return processed_args
def bake(self, *args, **kwargs):
fn = Command(self._path)
fn._partial = True
call_args, kwargs = self._extract_call_args(kwargs)
pruned_call_args = call_args
for k,v in Command.call_args.items():
try:
if pruned_call_args[k] == v:
del pruned_call_args[k]
except KeyError: continue
fn._partial_call_args.update(self._partial_call_args)
fn._partial_call_args.update(pruned_call_args)
fn._partial_baked_args.extend(self._partial_baked_args)
fn._partial_baked_args.extend(fn._compile_args(args, kwargs))
return fn
def __str__(self):
if IS_PY3: return self.__unicode__()
else: return unicode(self).encode(DEFAULT_ENCODING)
def __repr__(self):
return str(self)
def __unicode__(self):
baked_args = " ".join(self._partial_baked_args)
if baked_args: baked_args = " " + baked_args
return self._path + baked_args
def __eq__(self, other):
try: return str(self) == str(other)
except: return False
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __enter__(self):
Command._prepend_stack.append([self._path])
def __exit__(self, typ, value, traceback):
Command._prepend_stack.pop()
def __call__(self, *args, **kwargs):
kwargs = kwargs.copy()
args = list(args)
cmd = []
# aggregate any with contexts
for prepend in self._prepend_stack: cmd.extend(prepend)
cmd.append(self._path)
call_args, kwargs = self._extract_call_args(kwargs)
call_args.update(self._partial_call_args)
# here we normalize the ok_code to be something we can do
# "if return_code in call_args["ok_code"]" on
if not isinstance(call_args["ok_code"], (tuple, list)):
call_args["ok_code"] = [call_args["ok_code"]]
# set pipe to None if we're outputting straight to CLI
pipe = None if call_args["fg"] else subp.PIPE
# check if we're piping via composition
input_stream = pipe
input_data = None
if args:
first_arg = args.pop(0)
if isinstance(first_arg, RunningCommand):
# it makes sense that if the input pipe of a command is running
# in the background, then this command should run in the
# background as well
if first_arg.call_args["bg"]:
call_args["bg"] = True
input_stream = first_arg.process.stdout
else:
input_data = first_arg.stdout()
else: args.insert(0, first_arg)
processed_args = self._compile_args(args, kwargs)
# makes sure our arguments are broken up correctly
split_args = self._partial_baked_args + processed_args
final_args = split_args
cmd.extend(final_args)
command_ran = " ".join(cmd)
# with contexts shouldn't run at all yet, they prepend
# to every command in the context
if call_args["with"]:
Command._prepend_stack.append(cmd)
return RunningCommand(command_ran, None, call_args)
# stdin from string
input = call_args["in"]
if input:
input_data = input
# stdout redirection
stdout = pipe
out = call_args["out"]
if out:
if hasattr(out, "write"): stdout = out
else: stdout = open(str(out), "w")
# stderr redirection
stderr = pipe
err = call_args["err"]
if err:
if hasattr(err, "write"): stderr = err
else: stderr = open(str(err), "w")
if call_args["err_to_out"]: stderr = subp.STDOUT
# leave shell=False
process = subp.Popen(cmd, shell=False, env=call_args["env"],
cwd=call_args["cwd"],
stdin=input_stream, stdout=stdout, stderr=stderr)
return RunningCommand(command_ran, process, call_args, input_data)
| bsd-2-clause |
Lujeni/ansible | test/units/modules/network/fortios/test_fortios_switch_controller_qos_dot1p_map.py | 21 | 11217 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_switch_controller_qos_dot1p_map
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_switch_controller_qos_dot1p_map.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_switch_controller_qos_dot1p_map_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_qos_dot1p_map': {
'description': 'test_value_3',
'name': 'default_name_4',
'priority_0': 'queue-0',
'priority_1': 'queue-0',
'priority_2': 'queue-0',
'priority_3': 'queue-0',
'priority_4': 'queue-0',
'priority_5': 'queue-0',
'priority_6': 'queue-0',
'priority_7': 'queue-0'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_dot1p_map.fortios_switch_controller_qos(input_data, fos_instance)
expected_data = {
'description': 'test_value_3',
'name': 'default_name_4',
'priority-0': 'queue-0',
'priority-1': 'queue-0',
'priority-2': 'queue-0',
'priority-3': 'queue-0',
'priority-4': 'queue-0',
'priority-5': 'queue-0',
'priority-6': 'queue-0',
'priority-7': 'queue-0'
}
set_method_mock.assert_called_with('switch-controller.qos', 'dot1p-map', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_qos_dot1p_map_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_qos_dot1p_map': {
'description': 'test_value_3',
'name': 'default_name_4',
'priority_0': 'queue-0',
'priority_1': 'queue-0',
'priority_2': 'queue-0',
'priority_3': 'queue-0',
'priority_4': 'queue-0',
'priority_5': 'queue-0',
'priority_6': 'queue-0',
'priority_7': 'queue-0'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_dot1p_map.fortios_switch_controller_qos(input_data, fos_instance)
expected_data = {
'description': 'test_value_3',
'name': 'default_name_4',
'priority-0': 'queue-0',
'priority-1': 'queue-0',
'priority-2': 'queue-0',
'priority-3': 'queue-0',
'priority-4': 'queue-0',
'priority-5': 'queue-0',
'priority-6': 'queue-0',
'priority-7': 'queue-0'
}
set_method_mock.assert_called_with('switch-controller.qos', 'dot1p-map', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_qos_dot1p_map_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_qos_dot1p_map': {
'description': 'test_value_3',
'name': 'default_name_4',
'priority_0': 'queue-0',
'priority_1': 'queue-0',
'priority_2': 'queue-0',
'priority_3': 'queue-0',
'priority_4': 'queue-0',
'priority_5': 'queue-0',
'priority_6': 'queue-0',
'priority_7': 'queue-0'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_dot1p_map.fortios_switch_controller_qos(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller.qos', 'dot1p-map', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_qos_dot1p_map_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_qos_dot1p_map': {
'description': 'test_value_3',
'name': 'default_name_4',
'priority_0': 'queue-0',
'priority_1': 'queue-0',
'priority_2': 'queue-0',
'priority_3': 'queue-0',
'priority_4': 'queue-0',
'priority_5': 'queue-0',
'priority_6': 'queue-0',
'priority_7': 'queue-0'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_dot1p_map.fortios_switch_controller_qos(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller.qos', 'dot1p-map', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_qos_dot1p_map_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_qos_dot1p_map': {
'description': 'test_value_3',
'name': 'default_name_4',
'priority_0': 'queue-0',
'priority_1': 'queue-0',
'priority_2': 'queue-0',
'priority_3': 'queue-0',
'priority_4': 'queue-0',
'priority_5': 'queue-0',
'priority_6': 'queue-0',
'priority_7': 'queue-0'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_dot1p_map.fortios_switch_controller_qos(input_data, fos_instance)
expected_data = {
'description': 'test_value_3',
'name': 'default_name_4',
'priority-0': 'queue-0',
'priority-1': 'queue-0',
'priority-2': 'queue-0',
'priority-3': 'queue-0',
'priority-4': 'queue-0',
'priority-5': 'queue-0',
'priority-6': 'queue-0',
'priority-7': 'queue-0'
}
set_method_mock.assert_called_with('switch-controller.qos', 'dot1p-map', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_switch_controller_qos_dot1p_map_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_qos_dot1p_map': {
'random_attribute_not_valid': 'tag',
'description': 'test_value_3',
'name': 'default_name_4',
'priority_0': 'queue-0',
'priority_1': 'queue-0',
'priority_2': 'queue-0',
'priority_3': 'queue-0',
'priority_4': 'queue-0',
'priority_5': 'queue-0',
'priority_6': 'queue-0',
'priority_7': 'queue-0'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_dot1p_map.fortios_switch_controller_qos(input_data, fos_instance)
expected_data = {
'description': 'test_value_3',
'name': 'default_name_4',
'priority-0': 'queue-0',
'priority-1': 'queue-0',
'priority-2': 'queue-0',
'priority-3': 'queue-0',
'priority-4': 'queue-0',
'priority-5': 'queue-0',
'priority-6': 'queue-0',
'priority-7': 'queue-0'
}
set_method_mock.assert_called_with('switch-controller.qos', 'dot1p-map', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
a-y-u-s-h/QuarkWebsite2017 | quark/lib/python2.7/site-packages/wheel/util.py | 345 | 4890 | """Utility functions."""
import sys
import os
import base64
import json
import hashlib
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8',
'to_json', 'from_json', 'matches_requirement']
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(binary('='))
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def to_json(o):
'''Convert given data to JSON.'''
return json.dumps(o, sort_keys=True)
def from_json(j):
'''Decode a JSON payload.'''
return json.loads(j)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
try:
unicode
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, unicode):
return data.encode('utf-8')
return data
except NameError:
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, str):
return data.encode('utf-8')
return data
try:
# For encoding ascii back and forth between bytestrings, as is repeatedly
# necessary in JSON-based crypto under Python 3
unicode
def native(s):
return s
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def native(s):
if isinstance(s, bytes):
return s.decode('ascii')
return s
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
class HashingFile(object):
def __init__(self, fd, hashtype='sha256'):
self.fd = fd
self.hashtype = hashtype
self.hash = hashlib.new(hashtype)
self.length = 0
def write(self, data):
self.hash.update(data)
self.length += len(data)
self.fd.write(data)
def close(self):
self.fd.close()
def digest(self):
if self.hashtype == 'md5':
return self.hash.hexdigest()
digest = self.hash.digest()
return self.hashtype + '=' + native(urlsafe_b64encode(digest))
class OrderedDefaultDict(OrderedDict):
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__ (self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
if sys.platform == 'win32':
import ctypes.wintypes
# CSIDL_APPDATA for reference - not used here for compatibility with
# dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order
csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28,
CSIDL_COMMON_APPDATA=35)
def get_path(name):
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf)
return buf.value
def save_config_path(*resource):
appdata = get_path("CSIDL_LOCAL_APPDATA")
path = os.path.join(appdata, *resource)
if not os.path.isdir(path):
os.makedirs(path)
return path
def load_config_paths(*resource):
ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"]
for id in ids:
base = get_path(id)
path = os.path.join(base, *resource)
if os.path.exists(path):
yield path
else:
def save_config_path(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.save_config_path(*resource)
def load_config_paths(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.load_config_paths(*resource)
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
for wf in wheels:
f = wf.parsed_filename
dist = Distribution(project_name=f.group("name"), version=f.group("ver"))
if dist in req:
selected.append(wf)
return selected
| apache-2.0 |
diefenbach/django-lfc | lfc/tests/general_tests.py | 1 | 3131 | # django imports
from django.test import TestCase
from django.utils import translation
# permissions imports
import permissions.utils
# lfc imports
from lfc.models import Application
from lfc.models import Portal
from lfc.tests.utils import create_request
from lfc.utils import import_module
from lfc.utils import delete_cache
from lfc.utils import get_cache
from lfc.utils import set_cache
# lfc_page imports
from lfc_page.models import Page
class GeneralTestCase(TestCase):
"""Some general tests.
"""
def setUp(self):
"""
"""
from lfc.utils.initialize import initialize
initialize()
import_module("lfc_page").install()
try:
Application.objects.create(name="lfc_page")
except Application.DoesNotExist:
pass
self.p = Portal.objects.create()
self.p.notification_emails = "john@doe.com, jane@doe.com"
self.p1 = Page.objects.create(title="Page 1", slug="page-1")
def test_content_type(self):
"""
"""
self.assertEqual(self.p.content_type, u"portal")
self.assertEqual(self.p.get_content_type(), u"Portal")
self.assertEqual(self.p1.content_type, u"page")
self.assertEqual(self.p1.get_content_type(), u"Page")
def test_cache_1(self):
"""
"""
self.assertRaises(AssertionError, set_cache, ["1"], "hurz_0")
self.assertRaises(AssertionError, get_cache, ["1"])
self.assertRaises(AssertionError, set_cache, "12", "hurz_0")
# set_cache(["1", "2"], u"hurz_1")
# temp = get_cache(["1", "2"])
# self.assertEqual(temp, u"hurz_1")
# delete_cache("1")
# temp = get_cache(["1", "2"])
# self.assertEqual(temp, None)
set_cache(["A", "B", "C"], u"hurz_2")
temp = get_cache(["A", "B", "C"])
self.assertEqual(temp, u"hurz_2")
set_cache(["1", "2", "3", "4"], u"hurz_3")
set_cache(["1", "2", "3", "5"], u"hurz_4")
temp = get_cache(["1", "2", "3", "4"])
self.assertEqual(temp, u"hurz_3")
temp = get_cache(["1", "2", "3", "5"])
self.assertEqual(temp, u"hurz_4")
temp = get_cache(["1", "2", "3"])
self.assertEqual(temp, {'5': u'hurz_4', '4': u'hurz_3'})
def test_cache_2(self):
"""
"""
set_cache([1, "portlets", "left-slot"], u"portlets left")
set_cache([1, "portlets", "right-slot"], u"portlets right")
set_cache([1, "children"], ["c1", "c2"])
self.assertEqual(get_cache(["1", "portlets", "left-slot"]), u"portlets left")
self.assertEqual(get_cache(["1", "portlets", "right-slot"]), u"portlets right")
self.assertEqual(get_cache(["1", "children"]), ["c1", "c2"])
delete_cache("1")
self.assertEqual(get_cache(["1", "portlets"]), None)
self.assertEqual(get_cache(["1", "children"]), None)
def test_cache_3(self):
"""
"""
set_cache(["1", "2", "3", "4"], u"hurz_3")
delete_cache(["1", "2", "3"])
temp = get_cache(["1", "2"])
self.assertEqual(temp, {})
| bsd-3-clause |
ninapavlich/sitetest | sitetest/core/models.py | 1 | 53415 | # -*- coding: utf-8 -*-
import cookielib
import datetime
import httplib
import os
import re
import requests
import ssl
import sys
import traceback
import urllib2
import urllib
import urlparse
import zlib
import logging
from functools import wraps
from slugify import slugify
from bs4 import BeautifulSoup
try:
import cPickle as pickle
except ImportError:
import pickle
logger = logging.getLogger('sitetest')
TYPE_OTHER = 'other'
TYPE_MAILTO = 'mailto'
TYPE_INTERNAL = 'internal'
TYPE_EXTERNAL = 'external'
IMAGE_SUFFIXES = [
'.png', '.jpg', '.jpeg', '.gif', '.ico', '.svg'
]
FONT_SUFFIXES = [
'.otf', '.ttf', '.eot', '.cff', '.afm', '.lwfn', '.ffil', '.fon', '.pfm', '.woff', '.std', '.pro', '.xsf'
]
MEDIA_SUFFIXES = IMAGE_SUFFIXES + FONT_SUFFIXES + [
'.doc', '.pdf', '.ppt', '.zip', '.gzip', '.mp3', '.rar', '.exe',
'.avi', '.mpg', '.tif', '.wav', '.mov', '.psd', '.ai', '.wma',
'.eps', '.mp4', '.bmp', '.indd', '.swf', '.jar', '.dmg', '.iso', '.flv',
'.gz', '.fla', '.ogg', '.sql'
]
REDIRECT_CODES = [301, 302, 303]
USER_AGENT_STRING = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 Sitetest'
HEADERS = {'User-Agent': USER_AGENT_STRING,
# 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # TODO: image/webp
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'gzip, deflate', # TODO: gzip, deflate, sdch
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
USE_REQUESTS = False
def sslwrap(func):
@wraps(func)
def bar(*args, **kw):
kw['ssl_version'] = ssl.PROTOCOL_TLSv1
return func(*args, **kw)
return bar
ssl.wrap_socket = sslwrap(ssl.wrap_socket)
class MessageCategory(object):
# __slots__ = ['success', 'error', 'warning', 'info',]
key = None
message = None
info = None
level = None
messages = []
def __init__(self, key, message, level, label, info=''):
self.key = key
self.message = message
self.level = level
self.label = label
self.info = info
self.messages = []
def add_message(self, link, message):
self.messages.append({'link': link, 'message': message})
class MessageSet(object):
# __slots__ = ['success', 'error', 'warning', 'info',]
success = None
error = None
warning = None
info = None
verbose = False
def __init__(self, verbose=False):
self.verbose = verbose
self.success = []
self.error = []
self.warning = []
self.info = []
def get_score(self):
error_score = sum([message.count for message in self.error])
warning_score = sum([message.count for message in self.warning])
info_score = sum([message.count for message in self.info])
return "%s-%s-%s" % (error_score, warning_score, info_score)
class Message(object):
__slots__ = ['message', 'count']
message = None
category = None
count = 1
def __init__(self, message, category=None, count=1):
self.message = message
self.category = category
self.count = count
class SuccessMessage(Message):
pass
class ErrorMessage(Message):
pass
class WarningMessage(Message):
pass
class InfoMessage(Message):
pass
class BaseMessageable(object):
messages = None
verbose = False
def __init__(self):
pass
def get_score(self):
if self.messages:
return self.messages.get_score()
return None
def add_error_message(self, message, category=None, count=1):
if self.verbose:
logger.error("ERROR: %s" % (message))
self.messages.error.append(ErrorMessage(message, category, count))
if category:
category.add_message(self, message)
def add_warning_message(self, message, category=None, count=1):
if self.verbose:
logger.warn("WARNING: %s" % (message))
self.messages.warning.append(WarningMessage(message, category, count))
if category:
category.add_message(self, message)
def add_info_message(self, message, category=None, count=1):
self.messages.info.append(InfoMessage(message, category, count))
if category:
category.add_message(self, message)
def add_success_message(self, message, category=None, count=1):
self.messages.success.append(SuccessMessage(message, category, count))
if category:
category.add_message(self, message)
class LinkSet(BaseMessageable):
include_media = False
canonical_domain = None
domain_aliases = None
legacy_domains = None
ignore_query_string_keys = []
alias_query_strings = []
current_links = {}
loaded_links = {}
parsed_links = {}
parsable_links = {}
loadable_links = {}
test_results = None
message_categories = []
message_category_hash = {}
def __init__(self, options, canonical_domain, domain_aliases, legacy_domains, test_category_id, test_batch_id, verbose=False):
if verbose:
logger.debug('Loading link set...')
self.test_category_id = test_category_id
self.test_batch_id = test_batch_id
self.verbose = verbose
self.messages = MessageSet(verbose)
self.include_media = True if 'test_media' not in options else truthy(options['test_media'])
self.include_external_links = True if 'test_external_links' not in options else truthy(options['test_external_links'])
self.canonical_domain = canonical_domain
self.domain_aliases = domain_aliases
self.legacy_domains = legacy_domains
self.ignore_query_string_keys = [] if 'ignore_query_string_keys' not in options else options['ignore_query_string_keys']
self.alias_query_strings = [] if 'alias_query_strings' not in options else options['alias_query_strings']
self.skip_test_urls = [] if 'skip_test_urls' not in options else options['skip_test_urls']
self.skip_urls = [] if 'skip_urls' not in options else options['skip_urls']
self.max_parse_count = None if 'max_parse_count' not in options else options['max_parse_count']
if self.max_parse_count:
self.max_parse_count = int(self.max_parse_count)
self.use_basic_auth = False if 'use_basic_auth' not in options else truthy(options['use_basic_auth'])
self.basic_auth_username = '' if not self.use_basic_auth else options['basic_auth_username']
self.basic_auth_password = '' if not self.use_basic_auth else options['basic_auth_password']
self.loading_error = self.get_or_create_message_category('loading-error', "Loading error", 'danger')
self.parsing_error = self.get_or_create_message_category('parsing-error', "Parsing error", 'danger')
self.legacy_domain_error = self.get_or_create_message_category('legacy-domain-error', "Legacy domain warning", 'warning')
super(LinkSet, self).__init__()
def get_or_create_message_category(self, key, message, level, info=''):
labels = {
'danger': "Error",
'warning': "Warning",
'info': "Info"
}
if key not in self.message_category_hash:
category = MessageCategory(key, message, level, labels[level], info)
self.message_categories.append(category)
self.message_category_hash[key] = category
self.message_categories.sort(key=lambda x: x.key)
else:
category = self.message_category_hash[key]
return category
@property
def robots_url(self):
return "%srobots.txt" % (self.canonical_domain) if self.canonical_domain.endswith("/") else "%s/robots.txt" % (self.canonical_domain)
@property
def robots_link(self):
return self.current_links[self.robots_url]
@property
def default_sitemap_url(self):
return "%ssitemap.xml" % (self.canonical_domain) if self.canonical_domain.endswith("/") else "%s/sitemap.xml" % (self.canonical_domain)
@property
def sitemap_links(self):
return [self.current_links[url] for url in self.current_links if self.current_links[url].is_sitemap]
def load_link(self, page_link, recursive, expected_code=200, force=False):
should_skip = self.max_parse_count and (len(self.parsed_links) >= self.max_parse_count) and not force
if should_skip:
# print "Max parse count %s hit, turn recursive off"%(self.max_parse_count)
return
is_loadable = page_link.is_loadable_type(self.include_media, self.include_external_links)
not_already_loaded = (page_link.url not in self.loaded_links)
if is_loadable and not_already_loaded:
if self.verbose:
# trace_memory_usage()
referer_list = [link for link in page_link.referers]
logger.debug(">>> Load Link %s (parsed: %s/%s, loaded: %s/%s, total: %s)" % (page_link.__unicode__(), len(self.parsed_links), len(self.parsable_links), len(self.loaded_links), len(self.loadable_links), len(self.current_links)))
if '#' in page_link.__unicode__():
logger.debug("----- From: %s" % (referer_list))
load_successful, response = page_link.load(self, expected_code)
# record that we have parsed it
if page_link.url not in self.loaded_links:
self.loaded_links[page_link.url] = page_link
# parse child links of internal pages and css only
if page_link.likely_parseable_type:
# if self.verbose:
# # trace_memory_usage()
# print ">>> Parse Link %s (%s/%s, %s)"%(page_link.__unicode__(), len(self.parsed_links), len(self.parsable_links), len(self.current_links))
page_link.parse_response(response, self)
# record that we have parsed it
if page_link.url not in self.parsed_links:
self.parsed_links[page_link.url] = page_link
# Let's do it again!
if recursive and page_link.is_parseable_type:
for child_link_url in page_link.links:
if child_link_url not in self.parsed_links:
self.load_link(page_link.links[child_link_url], recursive, 200)
def get_or_create_link_object(self, url, referer=None):
incoming_url = url
referer_url = None if not referer else referer.ending_url
has_legacy_domain = False
for legacy_domain in self.legacy_domains:
if legacy_domain.lower() in url.lower():
has_legacy_domain = True
url = self.get_normalized_href(url, referer_url)
slashed_url = "%s/" % (url)
deslashed_url = url.rstrip("/")
if not url or url == '':
return None
if url in self.current_links:
link = self.current_links[url]
elif slashed_url in self.current_links:
link = self.current_links[slashed_url]
elif deslashed_url in self.current_links:
link = self.current_links[deslashed_url]
else:
link = LinkItem(url, self, self.verbose)
self.current_links[url] = link
if link.likely_parseable_type:
self.parsable_links[link.url] = link
if link.is_loadable_type(self.include_media, self.include_external_links):
self.loadable_links[link.url] = link
if has_legacy_domain:
legacy_error_message = "Legacy url <mark>%s</mark> found on url <mark>%s</mark>" % (incoming_url, referer_url)
if referer:
referer.add_error_message(legacy_error_message, link.set.legacy_domain_error)
# print ">>> Create Link %s (<<< %s)"%(link.__unicode__(), referer_url)
if referer and referer.url != url and referer.ending_url != url:
link.add_referer(referer)
if self.alias_query_strings:
alias_url = clean_query_string(url, self.alias_query_strings)
if url != alias_url:
link.alias_to = alias_url
if self.skip_test_urls:
for skip_url_pattern in self.skip_test_urls:
regexp = re.compile(skip_url_pattern)
if regexp.search(url):
link.skip_test = True
if self.skip_urls:
for skip_url_pattern in self.skip_urls:
regexp = re.compile(skip_url_pattern)
if regexp.search(url):
link.skip = True
return link
def get_link_type(self, url):
# Internal, External, Mailto, Other
if 'mailto:' in url.lower():
return TYPE_MAILTO
elif (':' in url.lower()) and ('http' not in url.lower()):
return TYPE_OTHER
else:
if '//' not in url.lower():
return TYPE_INTERNAL
else:
# a link is internal if it is relative (doesnt start with http or https)
# or one of the domain aliases is contained in the url
if self.canonical_domain.lower() in url.lower():
return TYPE_INTERNAL
for domain in self.domain_aliases:
if domain.lower() in url.lower():
return TYPE_INTERNAL
for domain in self.legacy_domains:
if domain.lower() in url.lower():
return TYPE_INTERNAL
return TYPE_EXTERNAL
def get_normalized_href(self, url, normalized_parent_url=None):
debug = False
if debug:
logger.debug('---> get_normalized_href for %s from %s' % (url, normalized_parent_url))
normalized = url
if normalized.startswith('//'):
if self.canonical_domain.startswith('https'):
normalized = "https:%s" % (normalized)
else:
normalized = "http:%s" % (normalized)
# Remove invalid bits
normalized = url_fix(normalized)
if debug:
logger.debug("---> fixed: %s" % (normalized))
# If this is the homepage
if normalized.strip('/') == self.canonical_domain.strip('/'):
normalized = self.canonical_domain
dequeried_parent_url = clear_query_string(normalized_parent_url)
# remove anything after the hashtag:
normalized = normalized.split('#')[0]
if debug:
logger.debug("---> dehashed: %s" % (normalized))
# for internal urls, make main domain present
link_type = self.get_link_type(normalized)
if debug:
logger.debug("---> link type is %s" % (link_type))
if link_type == TYPE_INTERNAL:
if self.canonical_domain not in normalized:
# First see if it has an alias domain
for alias in self.domain_aliases:
if alias.lower() in normalized.lower():
normalized = normalized.lower().replace(alias.lower(), self.canonical_domain)
if debug:
logger.debug("---> Replace alias domain in %s with canonical: %s" % (url, normalized))
for legacy_domain in self.legacy_domains:
if legacy_domain.lower() in url.lower():
normalized = normalized.lower().replace(legacy_domain.lower(), self.canonical_domain)
# Next, does it use an absolute path?
if normalized.startswith('/'):
if self.canonical_domain.endswith('/'):
normalized = "%s%s" % (self.canonical_domain, normalized[1:])
else:
normalized = "%s%s" % (self.canonical_domain, normalized)
if debug:
logger.debug("---> relative from root, replacd %s with %s" % (url, normalized))
elif normalized.startswith('http'):
if debug:
logger.debug("---> absolute url %s" % (normalized))
# if not, it must be relative to the parent
elif normalized_parent_url:
if dequeried_parent_url.endswith('/'):
normalized = "%s%s" % (dequeried_parent_url, normalized)
else:
parent_file_name = dequeried_parent_url.split('/')[-1]
contains_file_name = '.' in parent_file_name
if contains_file_name:
parent_parent_url = "/".join(dequeried_parent_url.split('/')[:-1])
normalized = "%s/%s" % (parent_parent_url, normalized)
else:
normalized = "%s/%s" % (dequeried_parent_url, normalized)
if debug:
logger.debug("---> relative from parent, replaced %s with %s" % (url, normalized))
# Next remove unwanted query strings:
normalized = clean_query_string(normalized, self.ignore_query_string_keys)
if debug:
logger.debug("---> query string cleared: %s" % (normalized))
if debug:
logger.debug('---> normalized ====> %s' % (normalized))
if '..' in normalized:
pre_condensed = normalized
# Condense the url
url_pieces = normalized.split('/')
domain = url_pieces[0]
parents = []
for url_dir in url_pieces[1:]:
if url_dir == '.':
continue
# Do nothing
elif url_dir == '..':
parents = parents[:-1]
else:
parents.append(url_dir)
consensed_path = "/".join(parents)
normalized = "%s/%s" % (domain, consensed_path)
if debug:
logger.debug('%s ---> condensed ====> %s' % (pre_condensed, normalized))
if '#' in normalized:
logger.warn("NOT SURE HOW # has stayed in URL %s (original: %s)" % (normalized, url))
return normalized
class LinkItem(BaseMessageable):
# __slots__ = ['_set', 'referers', 'image_links', 'hyper_links', 'css_links',
# 'script_links', 'url', 'ending_url', 'starting_type', 'ending_type', 'path',
# 'response_code', 'has_response','response_content_type','redirect_path',
# 'html','content','response_load_time','description','is_media','alias_to','skip_test','has_sitemap_entry']
has_response = False
response_code = None
response_content_type = None
alias_to = None
skip_test = False
skip = False
has_sitemap_entry = False
accessible_to_robots = False
is_sitemap = False
is_robots = False
def __init__(self, url, set, verbose=False):
self.messages = MessageSet(verbose)
self.verbose = verbose
self.referers = {}
self.image_links = {}
self.audio_links = {}
self.video_links = {}
self.hyper_links = {}
# self.object_links = {}
self.css_links = {}
self.css_image_links = {}
self.font_links = {}
self.script_links = {}
self.iframe_links = {}
self.screenshots = {}
# self.xhr_links = {}
self.url = self.starting_url = self.ending_url = url
parsed = urlparse.urlparse(url)
name, extension = os.path.splitext(parsed.path)
self.starting_type = self.ending_type = set.get_link_type(url)
self.path = parsed.path
self.domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed)
self.is_media = (extension.lower() in MEDIA_SUFFIXES)
self.is_font = (extension.lower() in FONT_SUFFIXES)
self.is_image = (extension.lower() in IMAGE_SUFFIXES)
self.source = None
self.html = None
self.title = url
self.redirect_path = None
self.has_sitemap_entry = False
self.dequeried_url = clear_query_string(self.url)
self.use_basic_auth = set.use_basic_auth
self.basic_auth_username = set.basic_auth_username
self.basic_auth_password = set.basic_auth_password
self.set = set
super(LinkItem, self).__init__()
def __unicode__(self):
url = ("%s-%s") % (self.url, self.ending_url) if self.url != self.ending_url else self.url
type = ("%s-%s") % (self.starting_type, self.ending_type) if self.starting_type != self.ending_type else self.starting_type
return ("%s [%s]") % (url, type)
@property
def page_url(self):
if self.url in self.set.parsed_links:
path = "parsed.html"
elif self.url in self.set.loaded_links:
path = "loaded.html"
else:
path = "other.html"
return path
@property
def page_hash(self):
return slugify(u"result-%s" % (self.page_slug))
@property
def page_results_hash(self):
return slugify(u"result-body-%s" % (self.page_slug))
@property
def page_slug(self):
return slugify(u'%s' % (self.url))
@property
def encoded_url(self):
return urllib.quote_plus(self.url)
@property
def active_mixed_content_links(self):
# https://community.qualys.com/blogs/securitylabs/2014/03/19/https-mixed-content-still-the-easiest-way-to-break-ssl
# css + scripts + xhr + web sockets + iframes
return dict(
self.script_links.items() +
self.css_links.items() +
self.iframe_links.items() +
self.css_image_links.items() +
self.font_links.items()
)
@property
def links(self):
return dict(self.image_links.items() + self.hyper_links.items() + self.css_links.items() + self.script_links.items())
@property
def content(self):
if self.is_html or self.is_xml:
return self.html
# elif self.is_javascript or self.is_css or self.is_xml:
return self.source
@property
def is_alias(self):
return self.alias_to is not None
def is_loadable_type(self, include_media, include_external_links):
if self.skip:
return False
is_internal = (self.starting_type == TYPE_INTERNAL and not self.is_media)
is_allowed_external = (self.starting_type == TYPE_EXTERNAL and include_external_links)
is_allowed_media = (self.is_media and include_media)
is_loadable = is_internal or is_allowed_external or is_allowed_media
return is_loadable
@property
def is_parseable_type(self):
return self.has_response and \
(self.is_internal or
(self.is_css and self.is_internal) or
(self.is_javascript and self.is_internal))
@property
def likely_parseable_type(self):
looks_like_media = ('.css' in self.url.lower()) or ('.js' in self.url.lower()) or ('.gz' in self.url.lower())
return (self.starting_type == TYPE_INTERNAL and not self.is_media) or (looks_like_media and self.parent_is_internal)
@property
def is_internal(self):
return self.ending_type == TYPE_INTERNAL and self.starting_type == TYPE_INTERNAL
@property
def parent_is_internal(self):
for referer_url in self.referers:
referer = self.referers[referer_url]
if referer.is_internal:
return True
return False
@property
def is_internal_html(self):
return self.is_internal and self.is_html and self.is_200
@property
def is_html(self):
content_type = self.response_content_type
return content_type and 'html' in content_type.lower()
@property
def is_javascript(self):
content_type = self.response_content_type
return content_type and 'javascript' in content_type.lower()
@property
def is_xml(self):
content_type = self.response_content_type
return content_type and 'xml' in content_type.lower()
@property
def is_css(self):
content_type = self.response_content_type
return content_type and 'css' in content_type.lower()
@property
def is_200(self):
return (self.response_code == 200)
@property
def is_redirect_page(self):
return (self.starting_url != self.ending_url)
def load(self, set, expected_code=200):
# TODO: Known SSL ERRORS:
# _ssl.c:504: error:14094410:SSL routines:SSL3_READ_BYTES:sslv3 alert handshake failure
# _ssl.c:504: error:14094438:SSL routines:SSL3_READ_BYTES:tlsv1 alert internal error
ignore_errors = ['_ssl.c:504']
response = None
start_time = datetime.datetime.now()
if self.use_basic_auth:
self.redirect_path = trace_path(self.url, self.is_internal, [], False, 0, None, 'basic', self.basic_auth_username, self.basic_auth_password)
else:
self.redirect_path = trace_path(self.url, self.is_internal, [])
if len(self.redirect_path) > 0:
last_response_item = self.redirect_path[-1]
self.response_content_type = last_response_item['response_content_type']
self.response_code = last_response_item['response_code']
self.response_encoding = last_response_item['response_encoding']
# SET CANONICAL URL TO ENDING URL
self.url = self.ending_url = get_ending_url(last_response_item['url'], last_response_item['ending_url'])
self.ending_type = set.get_link_type(self.ending_url)
load_time = datetime.datetime.now() - start_time
milliseconds = timedelta_milliseconds(load_time)
self.response_load_time = milliseconds
if self.response_code == 200:
# Retrieve last response object and clear it from the object
response = last_response_item['response']
last_response_item['response'] = None
self.has_response = True
else:
self.has_response = False
# Get any errors from the redirect path
for response_data in self.redirect_path:
if response_data['error'] is not None:
self.add_error_message(response_data['error'], self.set.loading_error)
if response_data['warning'] is not None:
self.add_warning_message(response_data['warning'], self.set.loading_error)
if expected_code != self.response_code:
# Ignore some known errors:
ignore_error = False
for ignore_code in ignore_errors:
if ignore_code.lower() in str(self.response_code).lower():
ignore_error = True
if ignore_error:
self.response_code = "Unknown"
return (True, response)
else:
message = "Loading error on page <mark>%s</mark> Expected <mark>%s</mark> Received <mark>%s</mark>" % (self.title, expected_code, self.response_code)
self.add_error_message(message, self.set.loading_error)
return (False, response)
else:
return (True, response)
def parse_response(self, response, set):
if USE_REQUESTS:
raw_response = None if response is None else response.text
else:
raw_response = None if response is None else response.read()
self.source = raw_response
# DETECT COMPRESSION
if response:
if self.response_encoding:
if self.response_encoding == 'gzip':
try:
# Attempt to read as gzipped file
decompressed = zlib.decompress(raw_response, 16 + zlib.MAX_WBITS)
self.source = decompressed
except Exception:
# print raw_response
self.source = raw_response
elif self.response_encoding == 'deflate':
try:
decompressed = zlib.decompressobj(-zlib.MAX_WBITS).decompress(raw_response)
self.source = decompressed
except Exception:
self.source = raw_response
else:
self.source = raw_response
# PARSE HTML/XML
if self.has_response and (self.is_html or self.is_xml):
try:
soup = BeautifulSoup(self.source, 'html5lib') # TODO -- should I convert to UTF-8? .decode('utf-8', 'ignore')
except Exception:
soup = None
self.add_error_message("Error parsing HTML on page <mark>%s</mark> %s" % (self.url, traceback.format_exc()), self.set.parsing_error)
if soup:
page_html = soup.prettify()
self.html = page_html
self.add_links(get_css_on_page(soup), self.css_links, set)
self.add_links(get_images_on_page(soup), self.image_links, set)
self.add_links(get_images_from_css(set, self), self.css_image_links, set)
self.add_links(get_fonts_on_page(set, self), self.font_links, set)
self.add_links(get_scripts_on_page(soup), self.script_links, set)
page_links = get_hyperlinks_on_page(soup)
# domained_links = []
# for link in page_links:
# if 'dev.heron' in link and link not in ignore_urls:
# domained_links.append(link)
# if len(domained_links) > 0:
# print 'Page %s contains %s links with absolute domains:'%(self.url, len(domained_links))
# for link in domained_links:
# print '> %s'%(link)
self.add_links(page_links + get_sitemap_links_on_page(soup), self.hyper_links, set)
self.add_links(get_audio_on_page(soup), self.audio_links, set)
self.add_links(get_video_on_page(soup), self.video_links, set)
# TODO: self.add_links(get_video_on_page(soup), self.object_links, set)
self.add_links(get_iframes_on_page(soup), self.iframe_links, set)
# self.add_links(get_xhr_links_on_page(soup), self.xhr_links, set)
# Create enumerated source
if self.content:
try:
enumerated_source_list = self.content.split("\n")
counter = 0
enumerated_source = ""
for line in enumerated_source_list:
new_line = ("%s: %s" % (counter, line))
enumerated_source += ("%s\n" % (new_line))
counter += 1
self.enumerated_source = enumerated_source
except Exception:
self.enumerated_source = "Error enumerating source: %s" % (traceback.format_exc())
# if 'css' in self.url:
# print "CSS %s is_css? %s: %s"%(self.url, self.is_css, self.content)
def add_links(self, input_links, list, set):
for input_link in input_links:
link_item = set.get_or_create_link_object(input_link, self)
if link_item:
self.add_link(link_item, list)
def add_link(self, link_item, list):
is_same_url = self.url == link_item.url
list_has_link = (link_item.url in list)
if not is_same_url and not list_has_link:
list[link_item.url] = link_item
def add_referer(self, link_item):
# print 'add referer to %s from %s'%(self.url, link_item.url)
self.add_link(link_item, self.referers)
###########################
# HELPER FUNCTIONS ########
###########################
def clean_query_string(url, ignore_query_string_keys):
url_parts = list(urlparse.urlparse(url))
query = dict(urlparse.parse_qsl(url_parts[4], True))
for unwanted_key in ignore_query_string_keys:
if unwanted_key in query:
del(query[unwanted_key])
url_parts[4] = urllib.urlencode(query)
new_url = urlparse.urlunparse(url_parts)
return new_url
def clear_query_string(url):
if not url:
return None
# Remove all query params from url
url_parts = list(urlparse.urlparse(url))
url_parts[4] = urllib.urlencode({})
new_url = urlparse.urlunparse(url_parts)
return new_url
def get_hyperlinks_on_page(soup):
output = []
# Traditional hyperlinks
for a in soup.findAll('a'):
try:
href = a['href']
output.append(href)
except:
href = None
return output
def get_sitemap_links_on_page(soup):
output = []
# Sitemap links
for url in soup.findAll('url'):
for loc in url.findAll('loc'):
if loc.text:
output.append(loc.text)
return output
def get_css_on_page(soup):
output = []
# CSS Links
for a in soup.findAll('link'):
try:
href = a['href']
rel = a['rel'][0].strip()
if rel == 'stylesheet':
output.append(href)
except: # TODO -- catch specific error
href = None
return output
def get_scripts_on_page(soup):
output = []
# JS Links
for a in soup.findAll('script'):
try:
src = a['src']
output.append(src)
except: # TODO -- catch specific error
pass
return output
def get_images_on_page(soup):
output = []
# Traditional hyperlinks
for img in soup.findAll('img'):
try:
src = img['src']
output.append(src)
except:
src = None
return output
def get_audio_on_page(soup):
output = []
for audio in soup.findAll('audio'):
try:
src = audio['src']
output.append(src)
except:
src = None
for source in audio.findAll('source'):
try:
src = source['src']
output.append(src)
except:
src = None
return output
def get_video_on_page(soup):
output = []
for video in soup.findAll('video'):
try:
src = video['src']
output.append(src)
except:
src = None
for source in video.findAll('source'):
try:
src = source['src']
output.append(src)
except:
src = None
return output
def get_iframes_on_page(soup):
output = []
for iframe in soup.findAll('iframe'):
try:
src = iframe['src']
output.append(src)
except:
src = None
return output
def get_images_from_css(set, link):
# TODO -- also include inline css
output = []
for css_url in link.css_links:
css_link = link.css_links[css_url]
set.load_link(css_link, False, 200)
if css_link.response_code == 200:
if css_link.content:
all_urls = re.findall('url\(([^)]+)\)', css_link.content)
for url in all_urls:
full_url = urlparse.urljoin(css_link.url, url.strip("'").strip('"'))
parsed = urlparse.urlparse(full_url)
name, extension = os.path.splitext(parsed.path)
is_font = (extension.lower() in FONT_SUFFIXES)
if is_font:
output.append(full_url)
return output
def get_fonts_on_page(set, link):
# TODO -- also include inline css
output = []
for css_url in link.css_links:
css_link = link.css_links[css_url]
set.load_link(css_link, False, 200)
if css_link.response_code == 200:
if css_link.content:
all_urls = re.findall('url\(([^)]+)\)', css_link.content)
for url in all_urls:
full_url = urlparse.urljoin(css_link.url, url.strip("'").strip('"'))
parsed = urlparse.urlparse(full_url)
name, extension = os.path.splitext(parsed.path)
is_font = (extension.lower() in FONT_SUFFIXES)
if is_font:
output.append(full_url)
return output
def timedelta_milliseconds(td):
return td.days * 86400000 + td.seconds * 1000 + td.microseconds / 1000
# Recursively follow redirects until there isn't a location header
class NoRedirection(urllib2.HTTPErrorProcessor):
# def http_response(self, request, response):
# return response
# https_response = http_response
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# only add this line to stop redirection.
if int(code) in REDIRECT_CODES:
return response
if not (200 <= code < 300):
response = self.parent.error('http', request, response, code, msg, hdrs)
return response
https_response = http_response
class TLSAV1dapter(requests.adapters.HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
# This method gets called when there's no proxy.
self.poolmanager = requests.packages.urllib3.poolmanager.PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
def proxy_manager_for(self, proxy, **proxy_kwargs):
# This method is called when there is a proxy.
proxy_kwargs['ssl_version'] = ssl.PROTOCOL_TLSv1
return super(TLSAV1dapter, self).proxy_manager_for(proxy, **proxy_kwargs)
def trace_path(url, is_internal, traced, enable_cookies=False, depth=0, cj_or_session=None, auth=None, username=None, password=None):
if USE_REQUESTS:
return trace_path_with_requests(url, is_internal, traced, enable_cookies, depth, cj_or_session, auth, username, password)
else:
return trace_path_with_urllib2(url, is_internal, traced, enable_cookies, depth, cj_or_session, auth, username, password)
def trace_path_with_requests(url, is_internal, traced, enable_cookies=False, depth=0, session=None, auth=None, username=None, password=None):
# Safely catch
MAX_REDIRECTS = 15
if depth > MAX_REDIRECTS:
traced[-1]['error'] = "Max redirects (%s) reached" % (MAX_REDIRECTS)
return traced
# Check for redirect loop
for trace_history in traced:
# If we are using cookies, then a redirect would consist of the same url and the same cookies
# If cookies are not enabled, then a redirect consists merely of the same url
is_same_url = trace_history['url'] == url
is_same_cookies = True if not enable_cookies else trace_history['pickled_cookies'] == pickle.dumps(session.cookies._cookies)
if is_same_url and is_same_cookies:
if not enable_cookies:
# Re-try with cookies enabled
first_url = traced[0]['url']
traced_with_cookies = trace_path(first_url, is_internal, [], True, 0, None, auth, username, password)
traced_with_cookies[0]['error'] = "Cookies required to correctly navigate to: %s" % (first_url)
return traced_with_cookies
else:
traced[-1]['error'] = "Redirect loop detected to %s" % (url)
return traced
use_auth = (auth == 'basic' and is_internal)
if enable_cookies or use_auth:
if not session:
session = requests.Session()
session.mount('https://', TLSAV1dapter())
if use_auth:
auth = (username, password)
session.auth = auth
response = None
response_data = {
'url': url,
'ending_url': None,
'response_code': None,
'response_content_type': None,
'response_encoding': None,
'redirect': None,
'response_load_time': None,
'error': None,
'warning': None,
'response': None
}
has_redirect = False
start_time = datetime.datetime.now()
# print '---> [%s] Trace path %s'%(depth, url)
try:
# Cases:
# -- no redirect
# -- headers
# -- password
# -- cookies
# -- authorization
# Don't verify cert here if we're testing the site. We'll test that on a separate step.
verify_cert = not is_internal
if session:
response = session.get(url, headers=HEADERS, allow_redirects=False, verify=verify_cert, timeout=10)
else:
response = requests.get(url, headers=HEADERS, allow_redirects=False, verify=verify_cert, timeout=10)
try:
code = response.status_code
except Exception:
logger.error("Error parsing code: %s" % (traceback.format_exc()))
code = 'Unknown'
response_header = response.headers
parse_trace_response(response, response_data, code, response_header, start_time)
response_data['response'] = response
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
response_data['response_code'] = "Timeout"
except requests.exceptions.TooManyRedirects:
# Tell the user their URL was bad and try a different one
response_data['response_code'] = "TooManyRedirects"
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
# Try loading with the session TLS adapter
if not enable_cookies:
traced_with_cookies = trace_path(url, is_internal, [], True, 0, None, auth, username, password)
return traced_with_cookies
else:
response_data['response_code'] = "RequestException: %s" % (e)
except Exception:
logger.error("Unkown Exception: %s" % (traceback.format_exc()))
response_data['response_code'] = "Unknown Exception: %s" % (traceback.format_exc())
if enable_cookies:
response_data['pickled_cookies'] = pickle.dumps(session.cookies._cookies)
response_data['request_headers'] = response.request.headers
traced.append(response_data)
has_redirect = response_data['redirect'] is not None
if has_redirect:
# Delete last response object
response_data['response'] = None
redirect_url = get_ending_url(response_data['url'], response_data['ending_url'])
traced = trace_path(redirect_url, is_internal, traced, enable_cookies, depth + 1, session, auth, username, password)
return traced
def trace_path_with_urllib2(url, is_internal, traced, enable_cookies=False, depth=0, cj=None, auth=None, username=None, password=None):
# Safely catch
MAX_REDIRECTS = 15
if depth > MAX_REDIRECTS:
traced[-1]['error'] = "Max redirects (%s) reached" % (MAX_REDIRECTS)
return traced
# Check for redirect loop
for trace_history in traced:
# If we are using cookies, then a redirect would consist of the same url and the same cookies
# If cookies are not enabled, then a redirect consists merely of the same url
is_same_url = trace_history['url'] == url
is_same_cookies = True if not enable_cookies else trace_history['pickled_cookies'] == pickle.dumps(cj._cookies)
if is_same_url and is_same_cookies:
if not enable_cookies:
# Re-try with cookies enabled
first_url = traced[0]['url']
traced_with_cookies = trace_path(first_url, is_internal, [], True, 0, None, auth, username, password)
traced_with_cookies[0]['warning'] = "Cookies required to correctly navigate to: %s" % (first_url)
return traced_with_cookies
else:
traced[-1]['error'] = "Redirect loop detected to %s" % (url)
return traced
use_auth = (auth == 'basic' and is_internal)
if enable_cookies or use_auth:
if not cj:
cj = cookielib.CookieJar()
if use_auth:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
password_handler = urllib2.HTTPBasicAuthHandler(password_manager)
if cj:
if use_auth:
opener = urllib2.build_opener(NoRedirection, urllib2.HTTPCookieProcessor(cj), password_handler)
else:
opener = urllib2.build_opener(NoRedirection, urllib2.HTTPCookieProcessor(cj))
else:
if use_auth:
opener = urllib2.build_opener(NoRedirection, password_handler)
else:
opener = urllib2.build_opener(NoRedirection)
request = urllib2.Request(url, headers=HEADERS)
response = None
response_data = {
'url': url,
'ending_url': None,
'response_code': None,
'response_content_type': None,
'response_encoding': None,
'redirect': None,
'response_load_time': None,
'error': None,
'warning': None,
'response': None
}
has_redirect = False
start_time = datetime.datetime.now()
# print '---> [%s] Trace path %s'%(depth, url)
try:
try:
response = opener.open(request)
response_header = response.info()
except ValueError:
logger.error("Value Error: %s" % (traceback.format_exc()))
request = urllib2.Request(url)
response = urllib2.urlopen(request)
try:
code = response.code
except Exception:
logger.error("Error parsing code: %s" % (traceback.format_exc()))
code = 'Unknown'
parse_trace_response(response, response_data, code, response_header, start_time)
response_data['response'] = response
except urllib2.HTTPError, e:
# print '---> urllib2.HTTPError %s - %s'%(e.code, e.headers)
try:
parse_trace_response(response, response_data, e.code, e.headers, start_time)
except Exception:
logger.error("Error parsing trace: %s" % (traceback.format_exc()))
response_data['response_code'] = "Unknown HTTPError"
except urllib2.URLError, e:
# checksLogger.error('URLError = ' + str(e.reason))
response_data['response_code'] = "Unknown URLError: %s" % (e.reason)
except httplib.BadStatusLine as e:
response_data['response_code'] = "Bad Status Error. (Presumably, the server closed the connection before sending a valid response)"
except Exception:
logger.error("Unkown Exception: %s" % (traceback.format_exc()))
response_data['response_code'] = "Unknown Exception: %s" % (traceback.format_exc())
if enable_cookies:
response_data['pickled_cookies'] = pickle.dumps(cj._cookies)
response_data['request_headers'] = request.headers
traced.append(response_data)
has_redirect = response_data['redirect'] is not None
if has_redirect:
# Delete last response object
response_data['response'] = None
redirect_url = get_ending_url(response_data['url'], response_data['ending_url'])
traced = trace_path(redirect_url, is_internal, traced, enable_cookies, depth + 1, cj, auth, username, password)
return traced
def parse_trace_response(response, response_data, code, response_header, start_time):
if USE_REQUESTS:
parse_trace_response_with_requests(response, response_data, code, response_header, start_time)
else:
parse_trace_response_with_urllib2(response, response_data, code, response_header, start_time)
def parse_trace_response_with_requests(response, response_data, code, response_header, start_time):
end_time = datetime.datetime.now()
response_data['response_headers'] = response_header
response_data['response_code'] = code
response_data['response_content_type'] = response_header.get('Content-Type')
response_data['response_encoding'] = response_header.get('Content-Encoding')
response_data['error'] = None
response_data['cookies'] = response_header.get("Set-Cookie")
load_time = end_time - start_time
milliseconds = timedelta_milliseconds(load_time)
response_data['response_load_time'] = milliseconds
response_data['ending_url'] = response_header.get('Location') or response_data['url']
has_redirect = response_data['url'] != response_data['ending_url']
if has_redirect:
response_data['redirect'] = response_data['ending_url']
def parse_trace_response_with_urllib2(response, response_data, code, response_header, start_time):
end_time = datetime.datetime.now()
response_data['response_headers'] = dict(response_header)
response_data['response_code'] = code
response_data['response_content_type'] = response_header.getheader('Content-Type')
response_data['response_encoding'] = response_header.getheader('Content-Encoding')
response_data['error'] = None
response_data['cookies'] = response_header.getheader("Set-Cookie")
load_time = end_time - start_time
milliseconds = timedelta_milliseconds(load_time)
response_data['response_load_time'] = milliseconds
response_data['ending_url'] = response_header.getheader('Location') or response_data['url']
has_redirect = response_data['url'] != response_data['ending_url']
if has_redirect:
response_data['redirect'] = response_data['ending_url']
def get_ending_url(starting_url, ending_url=None):
if not ending_url:
return starting_url
if 'http' in ending_url.lower():
return ending_url
# print "IT LOOKS LIKE the redirect from %s to %s was missing the domain."%(starting_url, ending_url)
# We may be receiving a relative redirect, such as "/path/redirect" without a domain
parsed_uri = urlparse.urlparse(starting_url)
starting_domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
if ending_url.startswith('/'):
return starting_domain + ending_url[1:]
else:
return starting_domain + ending_url
def is_redirect_code(code):
code_int = int(code)
if code_int in REDIRECT_CODES:
return True
return False
def url_fix(s, charset='utf-8'):
"""Sometimes you get an URL by a user that just isn't a real
URL because it contains unsafe characters like ' ' and so on. This
function can fix some of the problems in a similar way browsers
handle data entered by the user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
:param charset: The target charset for the URL if the url was
given as unicode string.
TODO:
https://fonts.googleapis.com/css?family=Alegreya+Sans:400,700,400italic,700italic should become:
https://fonts.googleapis.com/css?family=Alegreya+Sans%3A400%2C700%2C400italic%2C700italic
"""
if isinstance(s, unicode):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
path = urllib.quote(path, '/%')
parsed = urlparse.parse_qsl(qs, True)
qs = urllib.urlencode(parsed)
# Note -- the problem with this is that it is non-idempodent
# so if it's called multiple times on a URL, the URL becomes incorrect
# EX: http://dev.heron.org/engage/?category=&year=&author=Ryan+Halas&sort= becomes
# http://dev.heron.org/engage/?category=&year=&author=Ryan%2BHalas&sort=
# qs = urllib.quote_plus(qs, ':&=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
def store_file_locally(url):
temp_folder = 'tmp'
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
# Open the url
try:
f = urllib2.urlopen(url)
local_path = os.path.join(temp_folder, os.path.basename(url))
# Open our local file for writing
with open(local_path, "wb") as local_file:
local_file.write(f.read())
# handle errors
except urllib2.HTTPError, e:
logger.error("HTTP Error:", e.code, url)
except urllib2.URLError, e:
logger.error("URL Error:", e.reason, url)
return local_path
def trace_memory_usage():
logger.debug('Memory usage: %s' % memory_usage_resource())
# import gc
# import objgraph
# gc.collect() # don't care about stuff that would be garbage collected properly
# objgraph.show_most_common_types()
def memory_usage_resource():
import resource
rusage_denom = 1024.
if sys.platform == 'darwin':
# ... it seems that in OSX the output is different units ...
rusage_denom = rusage_denom * rusage_denom
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
return mem
def truthy(value):
if value == 'True':
return True
elif value == 'False':
return False
return value
| apache-2.0 |
ltilve/chromium | chrome/common/extensions/docs/server2/manifest_data_source_test.py | 87 | 6399 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
import json
import unittest
from future import Future
import manifest_data_source
from object_store_creator import ObjectStoreCreator
convert_and_annotate_docs = {
'name': {
'example': "My {{platform}}",
'name': 'name',
'level': 'required'
},
'doc2': {
'level': 'required',
'name': 'doc2'
},
'doc1': {
'level': 'required',
'name': 'doc1',
'children': {
'sub1': {
'annotations': ['not so important'],
'level': 'optional',
'name': 'sub1'
},
'sub2': {
'level': 'required',
'name': 'sub2'
}
}
},
'doc3': {
'level': 'only_one',
'name': 'doc3'
},
'doc4': {
'level': 'recommended',
'name': 'doc4'
},
'doc5': {
'level': 'only_one',
'name': 'doc5'
},
'doc6': {
'level': 'optional',
'name': 'doc6'
}
}
class ManifestDataSourceTest(unittest.TestCase):
def testListifyAndSortDocs(self):
expected_docs = [
{
'level': 'required',
'name': 'doc1',
'children': [
{
'level': 'required',
'name': 'sub2'
},
{
'annotations': ['not so important'],
'level': 'optional',
'name': 'sub1'
}
]
},
{
'level': 'required',
'name': 'doc2'
},
{
'level': 'required',
'example': '"My App"',
'has_example': True,
'name': 'name'
},
{
'level': 'recommended',
'name': 'doc4'
},
{
'level': 'only_one',
'name': 'doc3'
},
{
'level': 'only_one',
'name': 'doc5'
},
{
'level': 'optional',
'name': 'doc6'
}
]
self.assertEqual(expected_docs, manifest_data_source._ListifyAndSortDocs(
deepcopy(convert_and_annotate_docs), 'App'))
def testAnnotate(self):
expected_docs = [
{
'level': 'required',
'name': 'doc1',
'children': [
{
'level': 'required',
'name': 'sub2'
},
{
'annotations': ['Optional', 'not so important'],
'level': 'optional',
'name': 'sub1',
'is_last': True
}
]
},
{
'level': 'required',
'name': 'doc2'
},
{
'name': 'name',
'level': 'required',
'example': '"My App"',
'has_example': True
},
{
'annotations': ['Recommended'],
'level': 'recommended',
'name': 'doc4'
},
{
'annotations': ['Pick one (or none)'],
'level': 'only_one',
'name': 'doc3'
},
{
'level': 'only_one',
'name': 'doc5'
},
{
'annotations': ['Optional'],
'level': 'optional',
'name': 'doc6',
'is_last': True
}
]
annotated = manifest_data_source._ListifyAndSortDocs(
deepcopy(convert_and_annotate_docs), 'App')
manifest_data_source._AddLevelAnnotations(annotated)
self.assertEqual(expected_docs, annotated)
def testExpandedExamples(self):
docs = {
'doc1': {
'name': 'doc1',
'example': {
'big': {
'nested': {
'json_example': ['with', 'more', 'json']
}
}
}
}
}
expected_docs = [
{
'name': 'doc1',
'children': [
{
'name': 'big',
'children': [
{
'name': 'nested',
'children': [
{
'name': 'json_example',
'example': json.dumps(['with', 'more', 'json']),
'has_example': True
}
]
}
]
}
]
}
]
self.assertEqual(
expected_docs, manifest_data_source._ListifyAndSortDocs(docs, 'apps'))
def testNonExpandedExamples(self):
docs = {
'doc1': {
'name': 'doc1',
'example': {}
},
'doc2': {
'name': 'doc2',
'example': []
},
'doc3': {
'name': 'doc3',
'example': [{}]
}
}
expected_docs = [
{
'name': 'doc1',
'has_example': True,
'example': '{...}'
},
{
'name': 'doc2',
'has_example': True,
'example': '[...]'
},
{
'name': 'doc3',
'has_example': True,
'example': '[{...}]'
}
]
self.assertEqual(
expected_docs, manifest_data_source._ListifyAndSortDocs(docs, 'apps'))
def testManifestDataSource(self):
manifest_features = {
'doc1': {
'name': 'doc1',
'platforms': ['apps', 'extensions'],
'example': {},
'level': 'required'
},
'doc1.sub1': {
'name': 'doc1.sub1',
'platforms': ['apps'],
'annotations': ['important!'],
'level': 'recommended'
}
}
expected_app = [
{
'example': '{...}',
'has_example': True,
'level': 'required',
'name': 'doc1',
'platforms': ['apps', 'extensions'],
'children': [
{
'annotations': [
'Recommended',
'important!'
],
'level': 'recommended',
'name': 'sub1',
'platforms': ['apps'],
'is_last': True
}
],
'is_last': True
}
]
class FakePlatformBundle(object):
def GetFeaturesBundle(self, platform):
return FakeFeaturesBundle()
class FakeFeaturesBundle(object):
def GetManifestFeatures(self):
return Future(value=manifest_features)
class FakeServerInstance(object):
def __init__(self):
self.platform_bundle = FakePlatformBundle()
self.object_store_creator = ObjectStoreCreator.ForTest()
mds = manifest_data_source.ManifestDataSource(FakeServerInstance(), None)
self.assertEqual(expected_app, mds.get('apps'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
sugarlabs/sugar | src/jarabe/util/downloader.py | 2 | 8626 | # Copyright (C) 2009-2013, Sugar Labs
# Copyright (C) 2009, Tomeu Vizoso
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from urllib.parse import urlparse
import tempfile
import gi
gi.require_version('Soup', '2.4')
from gi.repository import GObject
from gi.repository import Soup
from gi.repository import Gio
from gi.repository import GLib
from jarabe import config
from sugar3 import env
_session = None
SOUP_STATUS_CANCELLED = 1
def soup_status_is_successful(status):
return status >= 200 and status < 300
def get_soup_session():
global _session
if _session is None:
_session = Soup.SessionAsync()
_session.set_property("timeout", 60)
_session.set_property("idle-timeout", 60)
_session.set_property("user-agent", "Sugar/%s" % config.version)
_session.add_feature_by_type(Soup.ProxyResolverDefault)
return _session
class Downloader(GObject.GObject):
__gsignals__ = {
'progress': (GObject.SignalFlags.RUN_FIRST,
None,
([float])),
'got-chunk': (GObject.SignalFlags.RUN_FIRST,
None,
(object,)),
'complete': (GObject.SignalFlags.RUN_FIRST,
None,
(object,)),
}
def __init__(self, url, session=None, request_headers=None):
GObject.GObject.__init__(self)
self._uri = Soup.URI.new(url)
self._session = session or get_soup_session()
self._pending_buffers = []
self._downloaded_size = 0
self._total_size = 0
self._cancelling = False
self._status_code = None
self._output_file = None
self._output_stream = None
self._message = None
self._request_headers = request_headers
def _setup_message(self, method="GET"):
self._message = Soup.Message(method=method, uri=self._uri)
self._message.connect('got-chunk', self._got_chunk_cb)
self._message.connect('got-headers', self._headers_cb, None)
if self._request_headers is not None:
for header_key in list(self._request_headers.keys()):
self._message.request_headers.append(
header_key, self._request_headers[header_key])
def download_to_temp(self):
"""
Download the contents of the provided URL to temporary file storage.
Use .get_local_file_path() to find the location of where the file
is saved. Upon completion, a successful download is indicated by a
result of None in the complete signal parameters.
"""
url = self._uri.to_string(False)
temp_file_path = self._get_temp_file_path(url)
self._output_file = Gio.File.new_for_path(temp_file_path)
self._output_stream = self._output_file.create(
Gio.FileCreateFlags.PRIVATE, None)
self.download_chunked()
def download_chunked(self):
"""
Download the contents of the provided URL into memory. The download
is done in chunks, and each chunk is emitted over the 'got-chunk'
signal. Upon completion, a successful download is indicated by a
reuslt of None in the complete signal parameters.
"""
self._setup_message()
self._message.response_body.set_accumulate(False)
self._session.queue_message(self._message, self._message_cb, None)
def download(self, start=None, end=None):
"""
Download the contents of the provided URL into memory.
Upon completion, the downloaded data will be passed as GBytes to the
result parameter of the complete signal handler.
The start and end parameters can optionally be set to perform a
partial read of the remote data.
"""
self._setup_message()
if start is not None:
self._message.request_headers.set_range(start, end)
self._session.queue_message(self._message, self._message_cb, None)
def get_size(self):
"""
Perform a HTTP HEAD request to find the size of the remote content.
The size is returned in the result parameter of the 'complete' signal.
"""
self._setup_message("HEAD")
self._session.queue_message(self._message, self._message_cb, None)
def _message_cb(self, session, message, user_data):
self._status_code = message.status_code
self._check_if_finished()
def cancel(self):
self._cancelling = True
self._session.cancel_message(self._message, SOUP_STATUS_CANCELLED)
def _headers_cb(self, message, user_data):
if soup_status_is_successful(message.status_code):
self._total_size = message.response_headers.get_content_length()
def _got_chunk_cb(self, message, buf):
if self._cancelling or \
not soup_status_is_successful(message.status_code):
return
data = buf.get_as_bytes()
self.emit('got-chunk', data)
if self._output_stream:
self._pending_buffers.append(data)
self._write_next_buffer()
def __write_async_cb(self, output_stream, result, user_data):
count = output_stream.write_bytes_finish(result)
self._downloaded_size += count
if self._total_size > 0:
progress = self._downloaded_size / float(self._total_size)
self.emit('progress', progress)
self._check_if_finished()
def _complete(self):
if self._output_stream:
self._output_stream.close(None)
result = None
if soup_status_is_successful(self._status_code):
if self._message.method == "HEAD":
# this is a get_size request
result = self._total_size
elif self._message.response_body.get_accumulate():
# the message body must be flattened so that it can be
# retrieved as GBytes because response_body.data gets
# incorrectly treated by introspection as a NULL-terminated
# string
# https://bugzilla.gnome.org/show_bug.cgi?id=704105
result = self._message.response_body.flatten().get_as_bytes()
else:
result = IOError("HTTP error code %d" % self._status_code)
self.emit('complete', result)
def _check_if_finished(self):
# To finish (for both successful completion and cancellation), we
# require two conditions to become true:
# 1. Soup message callback has been called
# 2. Any pending output file write completes
# Those conditions can become true in either order.
if not self._output_stream:
self._complete()
return
if self._cancelling or not self._pending_buffers:
if self._status_code is not None \
and not self._output_stream.has_pending():
self._complete()
return
self._write_next_buffer()
def _write_next_buffer(self):
if not self._output_stream.has_pending():
data = self._pending_buffers.pop(0)
self._output_stream.write_bytes_async(data, GLib.PRIORITY_LOW,
None, self.__write_async_cb,
None)
def _get_temp_file_path(self, uri):
# TODO: Should we use the HTTP headers for the file name?
scheme_, netloc_, path, params_, query_, fragment_ = \
urlparse(uri)
path = os.path.basename(path)
tmp_dir = os.path.join(env.get_profile_path(), 'data')
base_name, extension_ = os.path.splitext(path)
fd, file_path = tempfile.mkstemp(dir=tmp_dir,
prefix=base_name, suffix=extension_)
os.close(fd)
os.unlink(file_path)
return file_path
def get_local_file_path(self):
if self._output_file:
return self._output_file.get_path()
| gpl-3.0 |
tsabi/Odoo-tsabi-fixes | addons/email_template/wizard/mail_compose_message.py | 24 | 11174 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools, SUPERUSER_ID
from openerp.osv import osv, fields
def _reopen(self, res_id, model):
return {'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
# save original model in context, because selecting the list of available
# templates requires a model in context
'context': {
'default_model': model,
},
}
class mail_compose_message(osv.TransientModel):
_inherit = 'mail.compose.message'
def default_get(self, cr, uid, fields, context=None):
""" Override to pre-fill the data when having a template in single-email mode
and not going through the view: the on_change is not called in that case. """
if context is None:
context = {}
res = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
if res.get('composition_mode') != 'mass_mail' and context.get('default_template_id') and res.get('model') and res.get('res_id'):
res.update(
self.onchange_template_id(
cr, uid, [], context['default_template_id'], res.get('composition_mode'),
res.get('model'), res.get('res_id'), context=context
)['value']
)
return res
_columns = {
'template_id': fields.many2one('email.template', 'Use template', select=True),
}
def send_mail(self, cr, uid, ids, context=None):
""" Override of send_mail to duplicate attachments linked to the email.template.
Indeed, basic mail.compose.message wizard duplicates attachments in mass
mailing mode. But in 'single post' mode, attachments of an email template
also have to be duplicated to avoid changing their ownership. """
if context is None:
context = {}
wizard_context = dict(context)
for wizard in self.browse(cr, uid, ids, context=context):
if wizard.template_id:
wizard_context['mail_notify_user_signature'] = False # template user_signature is added when generating body_html
wizard_context['mail_auto_delete'] = wizard.template_id.auto_delete # mass mailing: use template auto_delete value -> note, for emails mass mailing only
if not wizard.attachment_ids or wizard.composition_mode == 'mass_mail' or not wizard.template_id:
continue
new_attachment_ids = []
for attachment in wizard.attachment_ids:
if attachment in wizard.template_id.attachment_ids:
new_attachment_ids.append(self.pool.get('ir.attachment').copy(cr, uid, attachment.id, {'res_model': 'mail.compose.message', 'res_id': wizard.id}, context=context))
else:
new_attachment_ids.append(attachment.id)
self.write(cr, uid, wizard.id, {'attachment_ids': [(6, 0, new_attachment_ids)]}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=wizard_context)
def onchange_template_id(self, cr, uid, ids, template_id, composition_mode, model, res_id, context=None):
""" - mass_mailing: we cannot render, so return the template values
- normal mode: return rendered values """
if template_id and composition_mode == 'mass_mail':
fields = ['subject', 'body_html', 'email_from', 'reply_to', 'mail_server_id']
template = self.pool['email.template'].browse(cr, uid, template_id, context=context)
values = dict((field, getattr(template, field)) for field in fields if getattr(template, field))
if template.attachment_ids:
values['attachment_ids'] = [att.id for att in template.attachment_ids]
if template.mail_server_id:
values['mail_server_id'] = template.mail_server_id.id
if template.user_signature and 'body_html' in values:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
values['body_html'] = tools.append_content_to_html(values['body_html'], signature)
elif template_id:
values = self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context=context)[res_id]
# transform attachments into attachment_ids; not attached to the document because this will
# be done further in the posting process, allowing to clean database if email not send
ir_attach_obj = self.pool.get('ir.attachment')
for attach_fname, attach_datas in values.pop('attachments', []):
data_attach = {
'name': attach_fname,
'datas': attach_datas,
'datas_fname': attach_fname,
'res_model': 'mail.compose.message',
'res_id': 0,
'type': 'binary', # override default_type from context, possibly meant for another model!
}
values.setdefault('attachment_ids', list()).append(ir_attach_obj.create(cr, uid, data_attach, context=context))
else:
values = self.default_get(cr, uid, ['subject', 'body', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'], context=context)
if values.get('body_html'):
values['body'] = values.pop('body_html')
return {'value': values}
def save_as_template(self, cr, uid, ids, context=None):
""" hit save as template button: current form value will be a new
template attached to the current document. """
email_template = self.pool.get('email.template')
ir_model_pool = self.pool.get('ir.model')
for record in self.browse(cr, uid, ids, context=context):
model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model)], context=context)
model_id = model_ids and model_ids[0] or False
model_name = ''
if model_id:
model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name
template_name = "%s: %s" % (model_name, tools.ustr(record.subject))
values = {
'name': template_name,
'subject': record.subject or False,
'body_html': record.body or False,
'model_id': model_id or False,
'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])],
}
template_id = email_template.create(cr, uid, values, context=context)
# generate the saved template
template_values = record.onchange_template_id(template_id, record.composition_mode, record.model, record.res_id)['value']
template_values['template_id'] = template_id
record.write(template_values)
return _reopen(self, record.id, record.model)
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def generate_email_for_composer_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
""" Call email_template.generate_email(), get fields relevant for
mail.compose.message, transform email_cc and email_to into partner_ids """
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'attachment_ids', 'mail_server_id']
returned_fields = fields + ['partner_ids', 'attachments']
values = dict.fromkeys(res_ids, False)
ctx = dict(context, tpl_partners_only=True)
template_values = self.pool.get('email.template').generate_email_batch(cr, uid, template_id, res_ids, fields=fields, context=ctx)
for res_id in res_ids:
res_id_values = dict((field, template_values[res_id][field]) for field in returned_fields if template_values[res_id].get(field))
res_id_values['body'] = res_id_values.pop('body_html', '')
values[res_id] = res_id_values
return values
def render_message_batch(self, cr, uid, wizard, res_ids, context=None):
""" Override to handle templates. """
# generate composer values
composer_values = super(mail_compose_message, self).render_message_batch(cr, uid, wizard, res_ids, context)
# generate template-based values
if wizard.template_id:
template_values = self.generate_email_for_composer_batch(
cr, uid, wizard.template_id.id, res_ids,
fields=['email_to', 'partner_to', 'email_cc', 'attachment_ids', 'mail_server_id'],
context=context)
else:
template_values = {}
for res_id in res_ids:
if template_values.get(res_id):
# recipients are managed by the template
composer_values[res_id].pop('partner_ids')
composer_values[res_id].pop('email_to')
composer_values[res_id].pop('email_cc')
# remove attachments from template values as they should not be rendered
template_values[res_id].pop('attachment_ids', None)
else:
template_values[res_id] = dict()
# update template values by composer values
template_values[res_id].update(composer_values[res_id])
return template_values
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
return self.pool.get('email.template').render_template_batch(cr, uid, template, model, res_ids, context=context, post_process=post_process)
# Compatibility methods
def generate_email_for_composer(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
huntxu/neutron | neutron/extensions/filter_validation.py | 2 | 1226 | # Copyright (c) 2017 Huawei Technology, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import extensions
from oslo_config import cfg
from oslo_log import log as logging
from neutron.extensions import _filter_validation_lib as apidef
LOG = logging.getLogger(__name__)
def _disable_extension_by_config(aliases):
if not cfg.CONF.filter_validation:
if 'filter-validation' in aliases:
aliases.remove('filter-validation')
LOG.info('Disabled filter validation extension.')
class Filter_validation(extensions.APIExtensionDescriptor):
"""Extension class supporting filter validation."""
api_definition = apidef
| apache-2.0 |
zhukite/nikola-install | nikola/rc4.py | 4 | 2173 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Bo Zhu http://about.bozhu.me
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import base64
import sys
def KSA(key):
keylength = len(key)
S = list(range(256))
j = 0
for i in range(256):
j = (j + S[i] + key[i % keylength]) % 256
S[i], S[j] = S[j], S[i] # swap
return S
def PRGA(S):
i = 0
j = 0
while True:
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i] # swap
K = S[(S[i] + S[j]) % 256]
yield K
def RC4(key):
S = KSA(key)
return PRGA(S)
def rc4(key, string):
"""Encrypt things.
>>> print(rc4("Key", "Plaintext"))
u/MW6NlArwrT
"""
string.encode('utf8')
key.encode('utf8')
def convert_key(s):
return [ord(c) for c in s]
key = convert_key(key)
keystream = RC4(key)
r = b''
for c in string:
if sys.version_info[0] == 3:
r += bytes([ord(c) ^ next(keystream)])
else:
r += chr(ord(c) ^ next(keystream))
return base64.b64encode(r).replace(b'\n', b'').decode('ascii')
| mit |
vladan-m/ggrc-core | src/ggrc/models/object_owner.py | 1 | 2701 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
from ggrc import db
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from .mixins import deferred, Mapping
from .reflection import PublishOnly
class ObjectOwner(Mapping, db.Model):
__tablename__ = 'object_owners'
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
ownable_id = db.Column(db.Integer, nullable=False)
ownable_type = db.Column(db.String, nullable=False)
@property
def ownable_attr(self):
return '{0}_ownable'.format(self.ownable_type)
@property
def ownable(self):
return getattr(self, self.ownable_attr)
@ownable.setter
def ownable(self, value):
self.ownable_id = value.id if value is not None else None
self.ownable_type = \
value.__class__.__name__ if value is not None else None
return setattr(self, self.ownable_attr, value)
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint('person_id', 'ownable_id', 'ownable_type'),
db.Index('ix_object_owners_ownable', 'ownable_type', 'ownable_id'),
)
_publish_attrs = [
'person',
'ownable',
]
#@classmethod
#def eager_query(cls):
#from sqlalchemy import orm
#query = super(ObjectOwner, cls).eager_query()
#return query.options(
#orm.subqueryload('person'))
def _display_name(self):
return self.ownable.display_name + '<->' + self.person.display_name
class Ownable(object):
@declared_attr
def object_owners(cls):
cls.owners = association_proxy(
'object_owners', 'person',
creator=lambda person: ObjectOwner(
person=person,
ownable_type=cls.__name__,
)
)
joinstr = 'and_(foreign(ObjectOwner.ownable_id) == {type}.id, '\
'foreign(ObjectOwner.ownable_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'ObjectOwner',
primaryjoin=joinstr,
backref='{0}_ownable'.format(cls.__name__),
cascade='all, delete-orphan',
)
_publish_attrs = [
'owners',
PublishOnly('object_owners'),
]
_include_links = [
#'object_owners',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Ownable, cls).eager_query()
return cls.eager_inclusions(query, Ownable._include_links).options(
orm.subqueryload('object_owners'))
| apache-2.0 |
ChrisGoedhart/Uforia | source/django/conf/locale/ml/formats.py | 341 | 1635 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| gpl-2.0 |
hackerkid/zulip | zerver/migrations/0073_custom_profile_fields.py | 6 | 2352 | # Generated by Django 1.10.5 on 2017-04-17 06:49
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0072_realmauditlog_add_index_event_time"),
]
operations = [
migrations.CreateModel(
name="CustomProfileField",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(max_length=100)),
(
"field_type",
models.PositiveSmallIntegerField(
choices=[(1, "Integer"), (2, "Float"), (3, "Short text"), (4, "Long text")],
default=3,
),
),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Realm"
),
),
],
),
migrations.CreateModel(
name="CustomProfileFieldValue",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("value", models.TextField()),
(
"field",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.CustomProfileField"
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.AlterUniqueTogether(
name="customprofilefieldvalue",
unique_together={("user_profile", "field")},
),
migrations.AlterUniqueTogether(
name="customprofilefield",
unique_together={("realm", "name")},
),
]
| apache-2.0 |
xq262144/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlrd/biffh.py | 64 | 16753 | # -*- coding: cp1252 -*-
##
# Support module for the xlrd package.
#
# <p>Portions copyright © 2005-2008 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# 2008-02-10 SJM BIFF2 BLANK record
# 2008-02-08 SJM Preparation for Excel 2.0 support
# 2008-02-02 SJM Added suffixes (_B2, _B2_ONLY, etc) on record names for biff_dump & biff_count
# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
# 2007-09-08 SJM Avoid crash when zero-length Unicode string missing options byte.
# 2007-04-22 SJM Remove experimental "trimming" facility.
DEBUG = 0
from struct import unpack
import sys
from timemachine import *
class XLRDError(Exception):
pass
##
# Parent of almost all other classes in the package. Defines a common "dump" method
# for debugging.
class BaseObject(object):
_repr_these = []
##
# @param f open file object, to which the dump is written
# @param header text to write before the dump
# @param footer text to write after the dump
# @param indent number of leading spaces (for recursive calls)
def dump(self, f=None, header=None, footer=None, indent=0):
if f is None:
f = sys.stderr
alist = self.__dict__.items()
alist.sort()
pad = " " * indent
if header is not None: print >> f, header
list_type = type([])
dict_type = type({})
for attr, value in alist:
if getattr(value, 'dump', None) and attr != 'book':
value.dump(f,
header="%s%s (%s object):" % (pad, attr, value.__class__.__name__),
indent=indent+4)
elif attr not in self._repr_these and (
isinstance(value, list_type) or isinstance(value, dict_type)
):
print >> f, "%s%s: %s, len = %d" % (pad, attr, type(value), len(value))
else:
print >> f, "%s%s: %r" % (pad, attr, value)
if footer is not None: print >> f, footer
FUN, FDT, FNU, FGE, FTX = range(5) # unknown, date, number, general, text
DATEFORMAT = FDT
NUMBERFORMAT = FNU
(
XL_CELL_EMPTY,
XL_CELL_TEXT,
XL_CELL_NUMBER,
XL_CELL_DATE,
XL_CELL_BOOLEAN,
XL_CELL_ERROR,
XL_CELL_BLANK, # for use in debugging, gathering stats, etc
) = range(7)
biff_text_from_num = {
0: "(not BIFF)",
20: "2.0",
21: "2.1",
30: "3",
40: "4S",
45: "4W",
50: "5",
70: "7",
80: "8",
85: "8X",
}
##
# <p>This dictionary can be used to produce a text version of the internal codes
# that Excel uses for error cells. Here are its contents:
# <pre>
# 0x00: '#NULL!', # Intersection of two cell ranges is empty
# 0x07: '#DIV/0!', # Division by zero
# 0x0F: '#VALUE!', # Wrong type of operand
# 0x17: '#REF!', # Illegal or deleted cell reference
# 0x1D: '#NAME?', # Wrong function or range name
# 0x24: '#NUM!', # Value range overflow
# 0x2A: '#N/A!', # Argument or function not available
# </pre></p>
error_text_from_code = {
0x00: '#NULL!', # Intersection of two cell ranges is empty
0x07: '#DIV/0!', # Division by zero
0x0F: '#VALUE!', # Wrong type of operand
0x17: '#REF!', # Illegal or deleted cell reference
0x1D: '#NAME?', # Wrong function or range name
0x24: '#NUM!', # Value range overflow
0x2A: '#N/A!', # Argument or function not available
}
BIFF_FIRST_UNICODE = 80
XL_WORKBOOK_GLOBALS = WBKBLOBAL = 0x5
XL_WORKBOOK_GLOBALS_4W = 0x100
XL_WORKSHEET = WRKSHEET = 0x10
XL_BOUNDSHEET_WORKSHEET = 0x00
XL_BOUNDSHEET_CHART = 0x02
XL_BOUNDSHEET_VB_MODULE = 0x06
# XL_RK2 = 0x7e
XL_ARRAY = 0x0221
XL_ARRAY2 = 0x0021
XL_BLANK = 0x0201
XL_BLANK_B2 = 0x01
XL_BOF = 0x809
XL_BOOLERR = 0x205
XL_BOOLERR_B2 = 0x5
XL_BOUNDSHEET = 0x85
XL_BUILTINFMTCOUNT = 0x56
XL_CF = 0x01B1
XL_CODEPAGE = 0x42
XL_COLINFO = 0x7D
XL_COLUMNDEFAULT = 0x20 # BIFF2 only
XL_COLWIDTH = 0x24 # BIFF2 only
XL_CONDFMT = 0x01B0
XL_CONTINUE = 0x3c
XL_COUNTRY = 0x8C
XL_DATEMODE = 0x22
XL_DEFAULTROWHEIGHT = 0x0225
XL_DEFCOLWIDTH = 0x55
XL_DIMENSION = 0x200
XL_DIMENSION2 = 0x0
XL_EFONT = 0x45
XL_EOF = 0x0a
XL_EXTERNNAME = 0x23
XL_EXTERNSHEET = 0x17
XL_EXTSST = 0xff
XL_FEAT11 = 0x872
XL_FILEPASS = 0x2f
XL_FONT = 0x31
XL_FONT_B3B4 = 0x231
XL_FORMAT = 0x41e
XL_FORMAT2 = 0x1E # BIFF2, BIFF3
XL_FORMULA = 0x6
XL_FORMULA3 = 0x206
XL_FORMULA4 = 0x406
XL_GCW = 0xab
XL_INDEX = 0x20b
XL_INTEGER = 0x2 # BIFF2 only
XL_IXFE = 0x44 # BIFF2 only
XL_LABEL = 0x204
XL_LABEL_B2 = 0x04
XL_LABELRANGES = 0x15f
XL_LABELSST = 0xfd
XL_MERGEDCELLS = 0xE5
XL_MSO_DRAWING = 0x00EC
XL_MSO_DRAWING_GROUP = 0x00EB
XL_MSO_DRAWING_SELECTION = 0x00ED
XL_MULRK = 0xbd
XL_MULBLANK = 0xbe
XL_NAME = 0x18
XL_NOTE = 0x1c
XL_NUMBER = 0x203
XL_NUMBER_B2 = 0x3
XL_OBJ = 0x5D
XL_PALETTE = 0x92
XL_RK = 0x27e
XL_ROW = 0x208
XL_ROW_B2 = 0x08
XL_RSTRING = 0xd6
XL_SHEETHDR = 0x8F # BIFF4W only
XL_SHEETSOFFSET = 0x8E # BIFF4W only
XL_SHRFMLA = 0x04bc
XL_SST = 0xfc
XL_STANDARDWIDTH = 0x99
XL_STRING = 0x207
XL_STRING_B2 = 0x7
XL_STYLE = 0x293
XL_SUPBOOK = 0x1AE
XL_TABLEOP = 0x236
XL_TABLEOP2 = 0x37
XL_TABLEOP_B2 = 0x36
XL_TXO = 0x1b6
XL_UNCALCED = 0x5e
XL_UNKNOWN = 0xffff
XL_WINDOW2 = 0x023E
XL_WRITEACCESS = 0x5C
XL_XF = 0xe0
XL_XF2 = 0x0043 # BIFF2 version of XF record
XL_XF3 = 0x0243 # BIFF3 version of XF record
XL_XF4 = 0x0443 # BIFF4 version of XF record
boflen = {0x0809: 8, 0x0409: 6, 0x0209: 6, 0x0009: 4}
bofcodes = (0x0809, 0x0409, 0x0209, 0x0009)
XL_FORMULA_OPCODES = (0x0006, 0x0406, 0x0206)
_cell_opcode_list = [
XL_BOOLERR,
XL_FORMULA,
XL_FORMULA3,
XL_FORMULA4,
XL_LABEL,
XL_LABELSST,
XL_MULRK,
XL_NUMBER,
XL_RK,
XL_RSTRING,
]
_cell_opcode_dict = {}
for _cell_opcode in _cell_opcode_list:
_cell_opcode_dict[_cell_opcode] = 1
is_cell_opcode = _cell_opcode_dict.has_key
# def fprintf(f, fmt, *vargs): f.write(fmt % vargs)
def fprintf(f, fmt, *vargs):
if fmt.endswith('\n'):
print >> f, fmt[:-1] % vargs
else:
print >> f, fmt % vargs,
def upkbits(tgt_obj, src, manifest, local_setattr=setattr):
for n, mask, attr in manifest:
local_setattr(tgt_obj, attr, (src & mask) >> n)
def upkbitsL(tgt_obj, src, manifest, local_setattr=setattr, local_int=int):
for n, mask, attr in manifest:
local_setattr(tgt_obj, attr, local_int((src & mask) >> n))
def unpack_string(data, pos, encoding, lenlen=1):
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
return unicode(data[pos:pos+nchars], encoding)
def unpack_string_update_pos(data, pos, encoding, lenlen=1, known_len=None):
if known_len is not None:
# On a NAME record, the length byte is detached from the front of the string.
nchars = known_len
else:
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
newpos = pos + nchars
return (unicode(data[pos:newpos], encoding), newpos)
def unpack_unicode(data, pos, lenlen=2):
"Return unicode_strg"
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
if not nchars:
# Ambiguous whether 0-length string should have an "options" byte.
# Avoid crash if missing.
return u""
pos += lenlen
options = ord(data[pos])
pos += 1
# phonetic = options & 0x04
# richtext = options & 0x08
if options & 0x08:
# rt = unpack('<H', data[pos:pos+2])[0] # unused
pos += 2
if options & 0x04:
# sz = unpack('<i', data[pos:pos+4])[0] # unused
pos += 4
if options & 0x01:
# Uncompressed UTF-16-LE
rawstrg = data[pos:pos+2*nchars]
# if DEBUG: print "nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg)
strg = unicode(rawstrg, 'utf_16_le')
# pos += 2*nchars
else:
# Note: this is COMPRESSED (not ASCII!) encoding!!!
# Merely returning the raw bytes would work OK 99.99% of the time
# if the local codepage was cp1252 -- however this would rapidly go pear-shaped
# for other codepages so we grit our Anglocentric teeth and return Unicode :-)
strg = unicode(data[pos:pos+nchars], "latin_1")
# pos += nchars
# if richtext:
# pos += 4 * rt
# if phonetic:
# pos += sz
# return (strg, pos)
return strg
def unpack_unicode_update_pos(data, pos, lenlen=2, known_len=None):
"Return (unicode_strg, updated value of pos)"
if known_len is not None:
# On a NAME record, the length byte is detached from the front of the string.
nchars = known_len
else:
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
if not nchars and not data[pos:]:
# Zero-length string with no options byte
return (u"", pos)
options = ord(data[pos])
pos += 1
phonetic = options & 0x04
richtext = options & 0x08
if richtext:
rt = unpack('<H', data[pos:pos+2])[0]
pos += 2
if phonetic:
sz = unpack('<i', data[pos:pos+4])[0]
pos += 4
if options & 0x01:
# Uncompressed UTF-16-LE
strg = unicode(data[pos:pos+2*nchars], 'utf_16_le')
pos += 2*nchars
else:
# Note: this is COMPRESSED (not ASCII!) encoding!!!
strg = unicode(data[pos:pos+nchars], "latin_1")
pos += nchars
if richtext:
pos += 4 * rt
if phonetic:
pos += sz
return (strg, pos)
def unpack_cell_range_address_list_update_pos(
output_list, data, pos, biff_version, addr_size=6):
# output_list is updated in situ
if biff_version < 80:
assert addr_size == 6
else:
assert addr_size in (6, 8)
n, = unpack("<H", data[pos:pos+2])
pos += 2
if n:
if addr_size == 6:
fmt = "<HHBB"
else:
fmt = "<HHHH"
for _unused in xrange(n):
ra, rb, ca, cb = unpack(fmt, data[pos:pos+addr_size])
output_list.append((ra, rb+1, ca, cb+1))
pos += addr_size
return pos
_brecstrg = """\
0000 DIMENSIONS_B2
0001 BLANK_B2
0002 INTEGER_B2_ONLY
0003 NUMBER_B2
0004 LABEL_B2
0005 BOOLERR_B2
0006 FORMULA
0007 STRING_B2
0008 ROW_B2
0009 BOF_B2
000A EOF
000B INDEX_B2_ONLY
000C CALCCOUNT
000D CALCMODE
000E PRECISION
000F REFMODE
0010 DELTA
0011 ITERATION
0012 PROTECT
0013 PASSWORD
0014 HEADER
0015 FOOTER
0016 EXTERNCOUNT
0017 EXTERNSHEET
0018 NAME_B2,5+
0019 WINDOWPROTECT
001A VERTICALPAGEBREAKS
001B HORIZONTALPAGEBREAKS
001C NOTE
001D SELECTION
001E FORMAT_B2-3
001F BUILTINFMTCOUNT_B2
0020 COLUMNDEFAULT_B2_ONLY
0021 ARRAY_B2_ONLY
0022 DATEMODE
0023 EXTERNNAME
0024 COLWIDTH_B2_ONLY
0025 DEFAULTROWHEIGHT_B2_ONLY
0026 LEFTMARGIN
0027 RIGHTMARGIN
0028 TOPMARGIN
0029 BOTTOMMARGIN
002A PRINTHEADERS
002B PRINTGRIDLINES
002F FILEPASS
0031 FONT
0032 FONT2_B2_ONLY
0036 TABLEOP_B2
0037 TABLEOP2_B2
003C CONTINUE
003D WINDOW1
003E WINDOW2_B2
0040 BACKUP
0041 PANE
0042 CODEPAGE
0043 XF_B2
0044 IXFE_B2_ONLY
0045 EFONT_B2_ONLY
004D PLS
0051 DCONREF
0055 DEFCOLWIDTH
0056 BUILTINFMTCOUNT_B3-4
0059 XCT
005A CRN
005B FILESHARING
005C WRITEACCESS
005D OBJECT
005E UNCALCED
005F SAVERECALC
0063 OBJECTPROTECT
007D COLINFO
007E RK2_mythical_?
0080 GUTS
0081 WSBOOL
0082 GRIDSET
0083 HCENTER
0084 VCENTER
0085 BOUNDSHEET
0086 WRITEPROT
008C COUNTRY
008D HIDEOBJ
008E SHEETSOFFSET
008F SHEETHDR
0090 SORT
0092 PALETTE
0099 STANDARDWIDTH
009B FILTERMODE
009C FNGROUPCOUNT
009D AUTOFILTERINFO
009E AUTOFILTER
00A0 SCL
00A1 SETUP
00AB GCW
00BD MULRK
00BE MULBLANK
00C1 MMS
00D6 RSTRING
00D7 DBCELL
00DA BOOKBOOL
00DD SCENPROTECT
00E0 XF
00E1 INTERFACEHDR
00E2 INTERFACEEND
00E5 MERGEDCELLS
00E9 BITMAP
00EB MSO_DRAWING_GROUP
00EC MSO_DRAWING
00ED MSO_DRAWING_SELECTION
00EF PHONETIC
00FC SST
00FD LABELSST
00FF EXTSST
013D TABID
015F LABELRANGES
0160 USESELFS
0161 DSF
01AE SUPBOOK
01AF PROTECTIONREV4
01B0 CONDFMT
01B1 CF
01B2 DVAL
01B6 TXO
01B7 REFRESHALL
01B8 HLINK
01BC PASSWORDREV4
01BE DV
01C0 XL9FILE
01C1 RECALCID
0200 DIMENSIONS
0201 BLANK
0203 NUMBER
0204 LABEL
0205 BOOLERR
0206 FORMULA_B3
0207 STRING
0208 ROW
0209 BOF
020B INDEX_B3+
0218 NAME
0221 ARRAY
0223 EXTERNNAME_B3-4
0225 DEFAULTROWHEIGHT
0231 FONT_B3B4
0236 TABLEOP
023E WINDOW2
0243 XF_B3
027E RK
0293 STYLE
0406 FORMULA_B4
0409 BOF
041E FORMAT
0443 XF_B4
04BC SHRFMLA
0800 QUICKTIP
0809 BOF
0862 SHEETLAYOUT
0867 SHEETPROTECTION
0868 RANGEPROTECTION
"""
biff_rec_name_dict = {}
for _buff in _brecstrg.splitlines():
_numh, _name = _buff.split()
biff_rec_name_dict[int(_numh, 16)] = _name
del _buff, _name, _brecstrg
def hex_char_dump(strg, ofs, dlen, base=0, fout=sys.stdout, unnumbered=False):
endpos = min(ofs + dlen, len(strg))
pos = ofs
numbered = not unnumbered
num_prefix = ''
while pos < endpos:
endsub = min(pos + 16, endpos)
substrg = strg[pos:endsub]
lensub = endsub - pos
if lensub <= 0 or lensub != len(substrg):
fprintf(
sys.stdout,
'??? hex_char_dump: ofs=%d dlen=%d base=%d -> endpos=%d pos=%d endsub=%d substrg=%r\n',
ofs, dlen, base, endpos, pos, endsub, substrg)
break
hexd = ''.join(["%02x " % ord(c) for c in substrg])
chard = ''
for c in substrg:
if c == '\0':
c = '~'
elif not (' ' <= c <= '~'):
c = '?'
chard += c
if numbered:
num_prefix = "%5d: " % (base+pos-ofs)
fprintf(fout, "%s %-48s %s\n", num_prefix, hexd, chard)
pos = endsub
def biff_dump(mem, stream_offset, stream_len, base=0, fout=sys.stdout, unnumbered=False):
pos = stream_offset
stream_end = stream_offset + stream_len
adj = base - stream_offset
dummies = 0
numbered = not unnumbered
num_prefix = ''
while stream_end - pos >= 4:
rc, length = unpack('<HH', mem[pos:pos+4])
if rc == 0 and length == 0:
if mem[pos:] == '\0' * (stream_end - pos):
dummies = stream_end - pos
savpos = pos
pos = stream_end
break
if dummies:
dummies += 4
else:
savpos = pos
dummies = 4
pos += 4
else:
if dummies:
if numbered:
num_prefix = "%5d: " % (adj + savpos)
fprintf(fout, "%s---- %d zero bytes skipped ----\n", num_prefix, dummies)
dummies = 0
recname = biff_rec_name_dict.get(rc, '<UNKNOWN>')
if numbered:
num_prefix = "%5d: " % (adj + pos)
fprintf(fout, "%s%04x %s len = %04x (%d)\n", num_prefix, rc, recname, length, length)
pos += 4
hex_char_dump(mem, pos, length, adj+pos, fout, unnumbered)
pos += length
if dummies:
if numbered:
num_prefix = "%5d: " % (adj + savpos)
fprintf(fout, "%s---- %d zero bytes skipped ----\n", num_prefix, dummies)
if pos < stream_end:
if numbered:
num_prefix = "%5d: " % (adj + pos)
fprintf(fout, "%s---- Misc bytes at end ----\n", num_prefix)
hex_char_dump(mem, pos, stream_end-pos, adj + pos, fout, unnumbered)
elif pos > stream_end:
fprintf(fout, "Last dumped record has length (%d) that is too large\n", length)
def biff_count_records(mem, stream_offset, stream_len, fout=sys.stdout):
pos = stream_offset
stream_end = stream_offset + stream_len
tally = {}
while stream_end - pos >= 4:
rc, length = unpack('<HH', mem[pos:pos+4])
if rc == 0 and length == 0:
if mem[pos:] == '\0' * (stream_end - pos):
break
recname = "<Dummy (zero)>"
else:
recname = biff_rec_name_dict.get(rc, None)
if recname is None:
recname = "Unknown_0x%04X" % rc
if tally.has_key(recname):
tally[recname] += 1
else:
tally[recname] = 1
pos += length + 4
slist = tally.items()
slist.sort()
for recname, count in slist:
print >> fout, "%8d %s" % (count, recname)
encoding_from_codepage = {
1200 : 'utf_16_le',
10000: 'mac_roman',
10006: 'mac_greek', # guess
10007: 'mac_cyrillic', # guess
10029: 'mac_latin2', # guess
10079: 'mac_iceland', # guess
10081: 'mac_turkish', # guess
32768: 'mac_roman',
32769: 'cp1252',
}
# some more guessing, for Indic scripts
# codepage 57000 range:
# 2 Devanagari [0]
# 3 Bengali [1]
# 4 Tamil [5]
# 5 Telegu [6]
# 6 Assamese [1] c.f. Bengali
# 7 Oriya [4]
# 8 Kannada [7]
# 9 Malayalam [8]
# 10 Gujarati [3]
# 11 Gurmukhi [2]
| apache-2.0 |
valkjsaaa/sl4a | python/src/Lib/test/test_doctest.py | 52 | 76371 | """
Test script for doctest.
"""
from test import test_support
import doctest
import warnings
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
######################################################################
## Sample Objects (used by test cases)
######################################################################
def sample_func(v):
"""
Blah blah
>>> print sample_func(22)
44
Yee ha!
"""
return v+v
class SampleClass:
"""
>>> print 1
1
>>> # comments get ignored. so are empty PS1 and PS2 prompts:
>>>
...
Multiline example:
>>> sc = SampleClass(3)
>>> for i in range(10):
... sc = sc.double()
... print sc.get(),
6 12 24 48 96 192 384 768 1536 3072
"""
def __init__(self, val):
"""
>>> print SampleClass(12).get()
12
"""
self.val = val
def double(self):
"""
>>> print SampleClass(12).double().get()
24
"""
return SampleClass(self.val + self.val)
def get(self):
"""
>>> print SampleClass(-5).get()
-5
"""
return self.val
def a_staticmethod(v):
"""
>>> print SampleClass.a_staticmethod(10)
11
"""
return v+1
a_staticmethod = staticmethod(a_staticmethod)
def a_classmethod(cls, v):
"""
>>> print SampleClass.a_classmethod(10)
12
>>> print SampleClass(0).a_classmethod(10)
12
"""
return v+2
a_classmethod = classmethod(a_classmethod)
a_property = property(get, doc="""
>>> print SampleClass(22).a_property
22
""")
class NestedClass:
"""
>>> x = SampleClass.NestedClass(5)
>>> y = x.square()
>>> print y.get()
25
"""
def __init__(self, val=0):
"""
>>> print SampleClass.NestedClass().get()
0
"""
self.val = val
def square(self):
return SampleClass.NestedClass(self.val*self.val)
def get(self):
return self.val
class SampleNewStyleClass(object):
r"""
>>> print '1\n2\n3'
1
2
3
"""
def __init__(self, val):
"""
>>> print SampleNewStyleClass(12).get()
12
"""
self.val = val
def double(self):
"""
>>> print SampleNewStyleClass(12).double().get()
24
"""
return SampleNewStyleClass(self.val + self.val)
def get(self):
"""
>>> print SampleNewStyleClass(-5).get()
-5
"""
return self.val
######################################################################
## Fake stdin (for testing interactive debugging)
######################################################################
class _FakeInput:
"""
A fake input stream for pdb's interactive debugger. Whenever a
line is read, print it (to simulate the user typing it), and then
return it. The set of lines to return is specified in the
constructor; they should not have trailing newlines.
"""
def __init__(self, lines):
self.lines = lines
def readline(self):
line = self.lines.pop(0)
print line
return line+'\n'
######################################################################
## Test Cases
######################################################################
def test_Example(): r"""
Unit tests for the `Example` class.
Example is a simple container class that holds:
- `source`: A source string.
- `want`: An expected output string.
- `exc_msg`: An expected exception message string (or None if no
exception is expected).
- `lineno`: A line number (within the docstring).
- `indent`: The example's indentation in the input string.
- `options`: An option dictionary, mapping option flags to True or
False.
These attributes are set by the constructor. `source` and `want` are
required; the other attributes all have default values:
>>> example = doctest.Example('print 1', '1\n')
>>> (example.source, example.want, example.exc_msg,
... example.lineno, example.indent, example.options)
('print 1\n', '1\n', None, 0, 0, {})
The first three attributes (`source`, `want`, and `exc_msg`) may be
specified positionally; the remaining arguments should be specified as
keyword arguments:
>>> exc_msg = 'IndexError: pop from an empty list'
>>> example = doctest.Example('[].pop()', '', exc_msg,
... lineno=5, indent=4,
... options={doctest.ELLIPSIS: True})
>>> (example.source, example.want, example.exc_msg,
... example.lineno, example.indent, example.options)
('[].pop()\n', '', 'IndexError: pop from an empty list\n', 5, 4, {8: True})
The constructor normalizes the `source` string to end in a newline:
Source spans a single line: no terminating newline.
>>> e = doctest.Example('print 1', '1\n')
>>> e.source, e.want
('print 1\n', '1\n')
>>> e = doctest.Example('print 1\n', '1\n')
>>> e.source, e.want
('print 1\n', '1\n')
Source spans multiple lines: require terminating newline.
>>> e = doctest.Example('print 1;\nprint 2\n', '1\n2\n')
>>> e.source, e.want
('print 1;\nprint 2\n', '1\n2\n')
>>> e = doctest.Example('print 1;\nprint 2', '1\n2\n')
>>> e.source, e.want
('print 1;\nprint 2\n', '1\n2\n')
Empty source string (which should never appear in real examples)
>>> e = doctest.Example('', '')
>>> e.source, e.want
('\n', '')
The constructor normalizes the `want` string to end in a newline,
unless it's the empty string:
>>> e = doctest.Example('print 1', '1\n')
>>> e.source, e.want
('print 1\n', '1\n')
>>> e = doctest.Example('print 1', '1')
>>> e.source, e.want
('print 1\n', '1\n')
>>> e = doctest.Example('print', '')
>>> e.source, e.want
('print\n', '')
The constructor normalizes the `exc_msg` string to end in a newline,
unless it's `None`:
Message spans one line
>>> exc_msg = 'IndexError: pop from an empty list'
>>> e = doctest.Example('[].pop()', '', exc_msg)
>>> e.exc_msg
'IndexError: pop from an empty list\n'
>>> exc_msg = 'IndexError: pop from an empty list\n'
>>> e = doctest.Example('[].pop()', '', exc_msg)
>>> e.exc_msg
'IndexError: pop from an empty list\n'
Message spans multiple lines
>>> exc_msg = 'ValueError: 1\n 2'
>>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg)
>>> e.exc_msg
'ValueError: 1\n 2\n'
>>> exc_msg = 'ValueError: 1\n 2\n'
>>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg)
>>> e.exc_msg
'ValueError: 1\n 2\n'
Empty (but non-None) exception message (which should never appear
in real examples)
>>> exc_msg = ''
>>> e = doctest.Example('raise X()', '', exc_msg)
>>> e.exc_msg
'\n'
"""
def test_DocTest(): r"""
Unit tests for the `DocTest` class.
DocTest is a collection of examples, extracted from a docstring, along
with information about where the docstring comes from (a name,
filename, and line number). The docstring is parsed by the `DocTest`
constructor:
>>> docstring = '''
... >>> print 12
... 12
...
... Non-example text.
...
... >>> print 'another\example'
... another
... example
... '''
>>> globs = {} # globals to run the test in.
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(docstring, globs, 'some_test',
... 'some_file', 20)
>>> print test
<DocTest some_test from some_file:20 (2 examples)>
>>> len(test.examples)
2
>>> e1, e2 = test.examples
>>> (e1.source, e1.want, e1.lineno)
('print 12\n', '12\n', 1)
>>> (e2.source, e2.want, e2.lineno)
("print 'another\\example'\n", 'another\nexample\n', 6)
Source information (name, filename, and line number) is available as
attributes on the doctest object:
>>> (test.name, test.filename, test.lineno)
('some_test', 'some_file', 20)
The line number of an example within its containing file is found by
adding the line number of the example and the line number of its
containing test:
>>> test.lineno + e1.lineno
21
>>> test.lineno + e2.lineno
26
If the docstring contains inconsistant leading whitespace in the
expected output of an example, then `DocTest` will raise a ValueError:
>>> docstring = r'''
... >>> print 'bad\nindentation'
... bad
... indentation
... '''
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 4 of the docstring for some_test has inconsistent leading whitespace: 'indentation'
If the docstring contains inconsistent leading whitespace on
continuation lines, then `DocTest` will raise a ValueError:
>>> docstring = r'''
... >>> print ('bad indentation',
... ... 2)
... ('bad', 'indentation')
... '''
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 2 of the docstring for some_test has inconsistent leading whitespace: '... 2)'
If there's no blank space after a PS1 prompt ('>>>'), then `DocTest`
will raise a ValueError:
>>> docstring = '>>>print 1\n1'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 1 of the docstring for some_test lacks blank after >>>: '>>>print 1'
If there's no blank space after a PS2 prompt ('...'), then `DocTest`
will raise a ValueError:
>>> docstring = '>>> if 1:\n...print 1\n1'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 2 of the docstring for some_test lacks blank after ...: '...print 1'
"""
def test_DocTestFinder(): r"""
Unit tests for the `DocTestFinder` class.
DocTestFinder is used to extract DocTests from an object's docstring
and the docstrings of its contained objects. It can be used with
modules, functions, classes, methods, staticmethods, classmethods, and
properties.
Finding Tests in Functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
For a function whose docstring contains examples, DocTestFinder.find()
will return a single test (for that function's docstring):
>>> finder = doctest.DocTestFinder()
We'll simulate a __file__ attr that ends in pyc:
>>> import test.test_doctest
>>> old = test.test_doctest.__file__
>>> test.test_doctest.__file__ = 'test_doctest.pyc'
>>> tests = finder.find(sample_func)
>>> print tests # doctest: +ELLIPSIS
[<DocTest sample_func from ...:16 (1 example)>]
The exact name depends on how test_doctest was invoked, so allow for
leading path components.
>>> tests[0].filename # doctest: +ELLIPSIS
'...test_doctest.py'
>>> test.test_doctest.__file__ = old
>>> e = tests[0].examples[0]
>>> (e.source, e.want, e.lineno)
('print sample_func(22)\n', '44\n', 3)
By default, tests are created for objects with no docstring:
>>> def no_docstring(v):
... pass
>>> finder.find(no_docstring)
[]
However, the optional argument `exclude_empty` to the DocTestFinder
constructor can be used to exclude tests for objects with empty
docstrings:
>>> def no_docstring(v):
... pass
>>> excl_empty_finder = doctest.DocTestFinder(exclude_empty=True)
>>> excl_empty_finder.find(no_docstring)
[]
If the function has a docstring with no examples, then a test with no
examples is returned. (This lets `DocTestRunner` collect statistics
about which functions have no tests -- but is that useful? And should
an empty test also be created when there's no docstring?)
>>> def no_examples(v):
... ''' no doctest examples '''
>>> finder.find(no_examples) # doctest: +ELLIPSIS
[<DocTest no_examples from ...:1 (no examples)>]
Finding Tests in Classes
~~~~~~~~~~~~~~~~~~~~~~~~
For a class, DocTestFinder will create a test for the class's
docstring, and will recursively explore its contents, including
methods, classmethods, staticmethods, properties, and nested classes.
>>> finder = doctest.DocTestFinder()
>>> tests = finder.find(SampleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
New-style classes are also supported:
>>> tests = finder.find(SampleNewStyleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
1 SampleNewStyleClass
1 SampleNewStyleClass.__init__
1 SampleNewStyleClass.double
1 SampleNewStyleClass.get
Finding Tests in Modules
~~~~~~~~~~~~~~~~~~~~~~~~
For a module, DocTestFinder will create a test for the class's
docstring, and will recursively explore its contents, including
functions, classes, and the `__test__` dictionary, if it exists:
>>> # A module
>>> import types
>>> m = types.ModuleType('some_module')
>>> def triple(val):
... '''
... >>> print triple(11)
... 33
... '''
... return val*3
>>> m.__dict__.update({
... 'sample_func': sample_func,
... 'SampleClass': SampleClass,
... '__doc__': '''
... Module docstring.
... >>> print 'module'
... module
... ''',
... '__test__': {
... 'd': '>>> print 6\n6\n>>> print 7\n7\n',
... 'c': triple}})
>>> finder = doctest.DocTestFinder()
>>> # Use module=test.test_doctest, to prevent doctest from
>>> # ignoring the objects since they weren't defined in m.
>>> import test.test_doctest
>>> tests = finder.find(m, module=test.test_doctest)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
1 some_module
3 some_module.SampleClass
3 some_module.SampleClass.NestedClass
1 some_module.SampleClass.NestedClass.__init__
1 some_module.SampleClass.__init__
2 some_module.SampleClass.a_classmethod
1 some_module.SampleClass.a_property
1 some_module.SampleClass.a_staticmethod
1 some_module.SampleClass.double
1 some_module.SampleClass.get
1 some_module.__test__.c
2 some_module.__test__.d
1 some_module.sample_func
Duplicate Removal
~~~~~~~~~~~~~~~~~
If a single object is listed twice (under different names), then tests
will only be generated for it once:
>>> from test import doctest_aliases
>>> tests = excl_empty_finder.find(doctest_aliases)
>>> print len(tests)
2
>>> print tests[0].name
test.doctest_aliases.TwoNames
TwoNames.f and TwoNames.g are bound to the same object.
We can't guess which will be found in doctest's traversal of
TwoNames.__dict__ first, so we have to allow for either.
>>> tests[1].name.split('.')[-1] in ['f', 'g']
True
Empty Tests
~~~~~~~~~~~
By default, an object with no doctests doesn't create any tests:
>>> tests = doctest.DocTestFinder().find(SampleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
By default, that excluded objects with no doctests. exclude_empty=False
tells it to include (empty) tests for objects with no doctests. This feature
is really to support backward compatibility in what doctest.master.summarize()
displays.
>>> tests = doctest.DocTestFinder(exclude_empty=False).find(SampleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
0 SampleClass.NestedClass.get
0 SampleClass.NestedClass.square
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
Turning off Recursion
~~~~~~~~~~~~~~~~~~~~~
DocTestFinder can be told not to look for tests in contained objects
using the `recurse` flag:
>>> tests = doctest.DocTestFinder(recurse=False).find(SampleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass
Line numbers
~~~~~~~~~~~~
DocTestFinder finds the line number of each example:
>>> def f(x):
... '''
... >>> x = 12
...
... some text
...
... >>> # examples are not created for comments & bare prompts.
... >>>
... ...
...
... >>> for x in range(10):
... ... print x,
... 0 1 2 3 4 5 6 7 8 9
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> [e.lineno for e in test.examples]
[1, 9, 12]
"""
def test_DocTestParser(): r"""
Unit tests for the `DocTestParser` class.
DocTestParser is used to parse docstrings containing doctest examples.
The `parse` method divides a docstring into examples and intervening
text:
>>> s = '''
... >>> x, y = 2, 3 # no output expected
... >>> if 1:
... ... print x
... ... print y
... 2
... 3
...
... Some text.
... >>> x+y
... 5
... '''
>>> parser = doctest.DocTestParser()
>>> for piece in parser.parse(s):
... if isinstance(piece, doctest.Example):
... print 'Example:', (piece.source, piece.want, piece.lineno)
... else:
... print ' Text:', `piece`
Text: '\n'
Example: ('x, y = 2, 3 # no output expected\n', '', 1)
Text: ''
Example: ('if 1:\n print x\n print y\n', '2\n3\n', 2)
Text: '\nSome text.\n'
Example: ('x+y\n', '5\n', 9)
Text: ''
The `get_examples` method returns just the examples:
>>> for piece in parser.get_examples(s):
... print (piece.source, piece.want, piece.lineno)
('x, y = 2, 3 # no output expected\n', '', 1)
('if 1:\n print x\n print y\n', '2\n3\n', 2)
('x+y\n', '5\n', 9)
The `get_doctest` method creates a Test from the examples, along with the
given arguments:
>>> test = parser.get_doctest(s, {}, 'name', 'filename', lineno=5)
>>> (test.name, test.filename, test.lineno)
('name', 'filename', 5)
>>> for piece in test.examples:
... print (piece.source, piece.want, piece.lineno)
('x, y = 2, 3 # no output expected\n', '', 1)
('if 1:\n print x\n print y\n', '2\n3\n', 2)
('x+y\n', '5\n', 9)
"""
class test_DocTestRunner:
def basics(): r"""
Unit tests for the `DocTestRunner` class.
DocTestRunner is used to run DocTest test cases, and to accumulate
statistics. Here's a simple DocTest case we can use:
>>> def f(x):
... '''
... >>> x = 12
... >>> print x
... 12
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
The main DocTestRunner interface is the `run` method, which runs a
given DocTest case in a given namespace (globs). It returns a tuple
`(f,t)`, where `f` is the number of failed tests and `t` is the number
of tried tests.
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=3)
If any example produces incorrect output, then the test runner reports
the failure and proceeds to the next example:
>>> def f(x):
... '''
... >>> x = 12
... >>> print x
... 14
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
... # doctest: +ELLIPSIS
Trying:
x = 12
Expecting nothing
ok
Trying:
print x
Expecting:
14
**********************************************************************
File ..., line 4, in f
Failed example:
print x
Expected:
14
Got:
12
Trying:
x//2
Expecting:
6
ok
TestResults(failed=1, attempted=3)
"""
def verbose_flag(): r"""
The `verbose` flag makes the test runner generate more detailed
output:
>>> def f(x):
... '''
... >>> x = 12
... >>> print x
... 12
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
Trying:
x = 12
Expecting nothing
ok
Trying:
print x
Expecting:
12
ok
Trying:
x//2
Expecting:
6
ok
TestResults(failed=0, attempted=3)
If the `verbose` flag is unspecified, then the output will be verbose
iff `-v` appears in sys.argv:
>>> # Save the real sys.argv list.
>>> old_argv = sys.argv
>>> # If -v does not appear in sys.argv, then output isn't verbose.
>>> sys.argv = ['test']
>>> doctest.DocTestRunner().run(test)
TestResults(failed=0, attempted=3)
>>> # If -v does appear in sys.argv, then output is verbose.
>>> sys.argv = ['test', '-v']
>>> doctest.DocTestRunner().run(test)
Trying:
x = 12
Expecting nothing
ok
Trying:
print x
Expecting:
12
ok
Trying:
x//2
Expecting:
6
ok
TestResults(failed=0, attempted=3)
>>> # Restore sys.argv
>>> sys.argv = old_argv
In the remaining examples, the test runner's verbosity will be
explicitly set, to ensure that the test behavior is consistent.
"""
def exceptions(): r"""
Tests of `DocTestRunner`'s exception handling.
An expected exception is specified with a traceback message. The
lines between the first line and the type/value may be omitted or
replaced with any other string:
>>> def f(x):
... '''
... >>> x = 12
... >>> print x//0
... Traceback (most recent call last):
... ZeroDivisionError: integer division or modulo by zero
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
An example may not generate output before it raises an exception; if
it does, then the traceback message will not be recognized as
signaling an expected exception, so the example will be reported as an
unexpected exception:
>>> def f(x):
... '''
... >>> x = 12
... >>> print 'pre-exception output', x//0
... pre-exception output
... Traceback (most recent call last):
... ZeroDivisionError: integer division or modulo by zero
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 4, in f
Failed example:
print 'pre-exception output', x//0
Exception raised:
...
ZeroDivisionError: integer division or modulo by zero
TestResults(failed=1, attempted=2)
Exception messages may contain newlines:
>>> def f(x):
... r'''
... >>> raise ValueError, 'multi\nline\nmessage'
... Traceback (most recent call last):
... ValueError: multi
... line
... message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
If an exception is expected, but an exception with the wrong type or
message is raised, then it is reported as a failure:
>>> def f(x):
... r'''
... >>> raise ValueError, 'message'
... Traceback (most recent call last):
... ValueError: wrong message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
raise ValueError, 'message'
Expected:
Traceback (most recent call last):
ValueError: wrong message
Got:
Traceback (most recent call last):
...
ValueError: message
TestResults(failed=1, attempted=1)
However, IGNORE_EXCEPTION_DETAIL can be used to allow a mismatch in the
detail:
>>> def f(x):
... r'''
... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... ValueError: wrong message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type:
>>> def f(x):
... r'''
... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... TypeError: wrong type
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
Expected:
Traceback (most recent call last):
TypeError: wrong type
Got:
Traceback (most recent call last):
...
ValueError: message
TestResults(failed=1, attempted=1)
If an exception is raised but not expected, then it is reported as an
unexpected exception:
>>> def f(x):
... r'''
... >>> 1//0
... 0
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
1//0
Exception raised:
Traceback (most recent call last):
...
ZeroDivisionError: integer division or modulo by zero
TestResults(failed=1, attempted=1)
"""
def optionflags(): r"""
Tests of `DocTestRunner`'s option flag handling.
Several option flags can be used to customize the behavior of the test
runner. These are defined as module constants in doctest, and passed
to the DocTestRunner constructor (multiple constants should be ORed
together).
The DONT_ACCEPT_TRUE_FOR_1 flag disables matches between True/False
and 1/0:
>>> def f(x):
... '>>> True\n1\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.DONT_ACCEPT_TRUE_FOR_1
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
True
Expected:
1
Got:
True
TestResults(failed=1, attempted=1)
The DONT_ACCEPT_BLANKLINE flag disables the match between blank lines
and the '<BLANKLINE>' marker:
>>> def f(x):
... '>>> print "a\\n\\nb"\na\n<BLANKLINE>\nb\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.DONT_ACCEPT_BLANKLINE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print "a\n\nb"
Expected:
a
<BLANKLINE>
b
Got:
a
<BLANKLINE>
b
TestResults(failed=1, attempted=1)
The NORMALIZE_WHITESPACE flag causes all sequences of whitespace to be
treated as equal:
>>> def f(x):
... '>>> print 1, 2, 3\n 1 2\n 3'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print 1, 2, 3
Expected:
1 2
3
Got:
1 2 3
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.NORMALIZE_WHITESPACE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
TestResults(failed=0, attempted=1)
An example from the docs:
>>> print range(20) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
The ELLIPSIS flag causes ellipsis marker ("...") in the expected
output to match any substring in the actual output:
>>> def f(x):
... '>>> print range(15)\n[0, 1, 2, ..., 14]\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(15)
Expected:
[0, 1, 2, ..., 14]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.ELLIPSIS
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
TestResults(failed=0, attempted=1)
... also matches nothing:
>>> for i in range(100):
... print i**2, #doctest: +ELLIPSIS
0 1...4...9 16 ... 36 49 64 ... 9801
... can be surprising; e.g., this test passes:
>>> for i in range(21): #doctest: +ELLIPSIS
... print i,
0 1 2 ...1...2...0
Examples from the docs:
>>> print range(20) # doctest:+ELLIPSIS
[0, 1, ..., 18, 19]
>>> print range(20) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
[0, 1, ..., 18, 19]
The SKIP flag causes an example to be skipped entirely. I.e., the
example is not run. It can be useful in contexts where doctest
examples serve as both documentation and test cases, and an example
should be included for documentation purposes, but should not be
checked (e.g., because its output is random, or depends on resources
which would be unavailable.) The SKIP flag can also be used for
'commenting out' broken examples.
>>> import unavailable_resource # doctest: +SKIP
>>> unavailable_resource.do_something() # doctest: +SKIP
>>> unavailable_resource.blow_up() # doctest: +SKIP
Traceback (most recent call last):
...
UncheckedBlowUpError: Nobody checks me.
>>> import random
>>> print random.random() # doctest: +SKIP
0.721216923889
The REPORT_UDIFF flag causes failures that involve multi-line expected
and actual outputs to be displayed using a unified diff:
>>> def f(x):
... r'''
... >>> print '\n'.join('abcdefg')
... a
... B
... c
... d
... f
... g
... h
... '''
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print '\n'.join('abcdefg')
Expected:
a
B
c
d
f
g
h
Got:
a
b
c
d
e
f
g
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_UDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print '\n'.join('abcdefg')
Differences (unified diff with -expected +actual):
@@ -1,7 +1,7 @@
a
-B
+b
c
d
+e
f
g
-h
TestResults(failed=1, attempted=1)
The REPORT_CDIFF flag causes failures that involve multi-line expected
and actual outputs to be displayed using a context diff:
>>> # Reuse f() from the REPORT_UDIFF example, above.
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_CDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print '\n'.join('abcdefg')
Differences (context diff with expected followed by actual):
***************
*** 1,7 ****
a
! B
c
d
f
g
- h
--- 1,7 ----
a
! b
c
d
+ e
f
g
TestResults(failed=1, attempted=1)
The REPORT_NDIFF flag causes failures to use the difflib.Differ algorithm
used by the popular ndiff.py utility. This does intraline difference
marking, as well as interline differences.
>>> def f(x):
... r'''
... >>> print "a b c d e f g h i j k l m"
... a b c d e f g h i j k 1 m
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_NDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print "a b c d e f g h i j k l m"
Differences (ndiff with -expected +actual):
- a b c d e f g h i j k 1 m
? ^
+ a b c d e f g h i j k l m
? + ++ ^
TestResults(failed=1, attempted=1)
The REPORT_ONLY_FIRST_FAILURE supresses result output after the first
failing example:
>>> def f(x):
... r'''
... >>> print 1 # first success
... 1
... >>> print 2 # first failure
... 200
... >>> print 3 # second failure
... 300
... >>> print 4 # second success
... 4
... >>> print 5 # third failure
... 500
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
print 2 # first failure
Expected:
200
Got:
2
TestResults(failed=3, attempted=5)
However, output from `report_start` is not supressed:
>>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
Trying:
print 1 # first success
Expecting:
1
ok
Trying:
print 2 # first failure
Expecting:
200
**********************************************************************
File ..., line 5, in f
Failed example:
print 2 # first failure
Expected:
200
Got:
2
TestResults(failed=3, attempted=5)
For the purposes of REPORT_ONLY_FIRST_FAILURE, unexpected exceptions
count as failures:
>>> def f(x):
... r'''
... >>> print 1 # first success
... 1
... >>> raise ValueError(2) # first failure
... 200
... >>> print 3 # second failure
... 300
... >>> print 4 # second success
... 4
... >>> print 5 # third failure
... 500
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
raise ValueError(2) # first failure
Exception raised:
...
ValueError: 2
TestResults(failed=3, attempted=5)
New option flags can also be registered, via register_optionflag(). Here
we reach into doctest's internals a bit.
>>> unlikely = "UNLIKELY_OPTION_NAME"
>>> unlikely in doctest.OPTIONFLAGS_BY_NAME
False
>>> new_flag_value = doctest.register_optionflag(unlikely)
>>> unlikely in doctest.OPTIONFLAGS_BY_NAME
True
Before 2.4.4/2.5, registering a name more than once erroneously created
more than one flag value. Here we verify that's fixed:
>>> redundant_flag_value = doctest.register_optionflag(unlikely)
>>> redundant_flag_value == new_flag_value
True
Clean up.
>>> del doctest.OPTIONFLAGS_BY_NAME[unlikely]
"""
def option_directives(): r"""
Tests of `DocTestRunner`'s option directive mechanism.
Option directives can be used to turn option flags on or off for a
single example. To turn an option on for an example, follow that
example with a comment of the form ``# doctest: +OPTION``:
>>> def f(x): r'''
... >>> print range(10) # should fail: no ellipsis
... [0, 1, ..., 9]
...
... >>> print range(10) # doctest: +ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
To turn an option off for an example, follow that example with a
comment of the form ``# doctest: -OPTION``:
>>> def f(x): r'''
... >>> print range(10)
... [0, 1, ..., 9]
...
... >>> # should fail: no ellipsis
... >>> print range(10) # doctest: -ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False,
... optionflags=doctest.ELLIPSIS).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 6, in f
Failed example:
print range(10) # doctest: -ELLIPSIS
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
Option directives affect only the example that they appear with; they
do not change the options for surrounding examples:
>>> def f(x): r'''
... >>> print range(10) # Should fail: no ellipsis
... [0, 1, ..., 9]
...
... >>> print range(10) # doctest: +ELLIPSIS
... [0, 1, ..., 9]
...
... >>> print range(10) # Should fail: no ellipsis
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # Should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
**********************************************************************
File ..., line 8, in f
Failed example:
print range(10) # Should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=2, attempted=3)
Multiple options may be modified by a single option directive. They
may be separated by whitespace, commas, or both:
>>> def f(x): r'''
... >>> print range(10) # Should fail
... [0, 1, ..., 9]
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
>>> def f(x): r'''
... >>> print range(10) # Should fail
... [0, 1, ..., 9]
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
>>> def f(x): r'''
... >>> print range(10) # Should fail
... [0, 1, ..., 9]
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
The option directive may be put on the line following the source, as
long as a continuation prompt is used:
>>> def f(x): r'''
... >>> print range(10)
... ... # doctest: +ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
For examples with multi-line source, the option directive may appear
at the end of any line:
>>> def f(x): r'''
... >>> for x in range(10): # doctest: +ELLIPSIS
... ... print x,
... 0 1 2 ... 9
...
... >>> for x in range(10):
... ... print x, # doctest: +ELLIPSIS
... 0 1 2 ... 9
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
If more than one line of an example with multi-line source has an
option directive, then they are combined:
>>> def f(x): r'''
... Should fail (option directive not on the last line):
... >>> for x in range(10): # doctest: +ELLIPSIS
... ... print x, # doctest: +NORMALIZE_WHITESPACE
... 0 1 2...9
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
It is an error to have a comment of the form ``# doctest:`` that is
*not* followed by words of the form ``+OPTION`` or ``-OPTION``, where
``OPTION`` is an option that has been registered with
`register_option`:
>>> # Error: Option not registered
>>> s = '>>> print 12 #doctest: +BADOPTION'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 1 of the doctest for s has an invalid option: '+BADOPTION'
>>> # Error: No + or - prefix
>>> s = '>>> print 12 #doctest: ELLIPSIS'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 1 of the doctest for s has an invalid option: 'ELLIPSIS'
It is an error to use an option directive on a line that contains no
source:
>>> s = '>>> # doctest: +ELLIPSIS'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 0 of the doctest for s has an option directive on a line with no example: '# doctest: +ELLIPSIS'
"""
def test_testsource(): r"""
Unit tests for `testsource()`.
The testsource() function takes a module and a name, finds the (first)
test with that name in that module, and converts it to a script. The
example code is converted to regular Python code. The surrounding
words and expected output are converted to comments:
>>> import test.test_doctest
>>> name = 'test.test_doctest.sample_func'
>>> print doctest.testsource(test.test_doctest, name)
# Blah blah
#
print sample_func(22)
# Expected:
## 44
#
# Yee ha!
<BLANKLINE>
>>> name = 'test.test_doctest.SampleNewStyleClass'
>>> print doctest.testsource(test.test_doctest, name)
print '1\n2\n3'
# Expected:
## 1
## 2
## 3
<BLANKLINE>
>>> name = 'test.test_doctest.SampleClass.a_classmethod'
>>> print doctest.testsource(test.test_doctest, name)
print SampleClass.a_classmethod(10)
# Expected:
## 12
print SampleClass(0).a_classmethod(10)
# Expected:
## 12
<BLANKLINE>
"""
def test_debug(): r"""
Create a docstring that we want to debug:
>>> s = '''
... >>> x = 12
... >>> print x
... 12
... '''
Create some fake stdin input, to feed to the debugger:
>>> import tempfile
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput(['next', 'print x', 'continue'])
Run the debugger on the docstring, and then restore sys.stdin.
>>> try: doctest.debug_src(s)
... finally: sys.stdin = real_stdin
> <string>(1)<module>()
(Pdb) next
12
--Return--
> <string>(1)<module>()->None
(Pdb) print x
12
(Pdb) continue
"""
def test_pdb_set_trace():
"""Using pdb.set_trace from a doctest.
You can use pdb.set_trace from a doctest. To do so, you must
retrieve the set_trace function from the pdb module at the time
you use it. The doctest module changes sys.stdout so that it can
capture program output. It also temporarily replaces pdb.set_trace
with a version that restores stdout. This is necessary for you to
see debugger output.
>>> doc = '''
... >>> x = 42
... >>> import pdb; pdb.set_trace()
... '''
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(doc, {}, "foo", "foo.py", 0)
>>> runner = doctest.DocTestRunner(verbose=False)
To demonstrate this, we'll create a fake standard input that
captures our debugger input:
>>> import tempfile
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print x', # print data defined by the example
... 'continue', # stop debugging
... ''])
>>> try: runner.run(test)
... finally: sys.stdin = real_stdin
--Return--
> <doctest foo[1]>(1)<module>()->None
-> import pdb; pdb.set_trace()
(Pdb) print x
42
(Pdb) continue
TestResults(failed=0, attempted=2)
You can also put pdb.set_trace in a function called from a test:
>>> def calls_set_trace():
... y=2
... import pdb; pdb.set_trace()
>>> doc = '''
... >>> x=1
... >>> calls_set_trace()
... '''
>>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print y', # print data defined in the function
... 'up', # out of function
... 'print x', # print data defined by the example
... 'continue', # stop debugging
... ''])
>>> try:
... runner.run(test)
... finally:
... sys.stdin = real_stdin
--Return--
> <doctest test.test_doctest.test_pdb_set_trace[8]>(3)calls_set_trace()->None
-> import pdb; pdb.set_trace()
(Pdb) print y
2
(Pdb) up
> <doctest foo[1]>(1)<module>()
-> calls_set_trace()
(Pdb) print x
1
(Pdb) continue
TestResults(failed=0, attempted=2)
During interactive debugging, source code is shown, even for
doctest examples:
>>> doc = '''
... >>> def f(x):
... ... g(x*2)
... >>> def g(x):
... ... print x+3
... ... import pdb; pdb.set_trace()
... >>> f(3)
... '''
>>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'list', # list source from example 2
... 'next', # return from g()
... 'list', # list source from example 1
... 'next', # return from f()
... 'list', # list source from example 3
... 'continue', # stop debugging
... ''])
>>> try: runner.run(test)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
--Return--
> <doctest foo[1]>(3)g()->None
-> import pdb; pdb.set_trace()
(Pdb) list
1 def g(x):
2 print x+3
3 -> import pdb; pdb.set_trace()
[EOF]
(Pdb) next
--Return--
> <doctest foo[0]>(2)f()->None
-> g(x*2)
(Pdb) list
1 def f(x):
2 -> g(x*2)
[EOF]
(Pdb) next
--Return--
> <doctest foo[2]>(1)<module>()->None
-> f(3)
(Pdb) list
1 -> f(3)
[EOF]
(Pdb) continue
**********************************************************************
File "foo.py", line 7, in foo
Failed example:
f(3)
Expected nothing
Got:
9
TestResults(failed=1, attempted=3)
"""
def test_pdb_set_trace_nested():
"""This illustrates more-demanding use of set_trace with nested functions.
>>> class C(object):
... def calls_set_trace(self):
... y = 1
... import pdb; pdb.set_trace()
... self.f1()
... y = 2
... def f1(self):
... x = 1
... self.f2()
... x = 2
... def f2(self):
... z = 1
... z = 2
>>> calls_set_trace = C().calls_set_trace
>>> doc = '''
... >>> a = 1
... >>> calls_set_trace()
... '''
>>> parser = doctest.DocTestParser()
>>> runner = doctest.DocTestRunner(verbose=False)
>>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print y', # print data defined in the function
... 'step', 'step', 'step', 'step', 'step', 'step', 'print z',
... 'up', 'print x',
... 'up', 'print y',
... 'up', 'print foo',
... 'continue', # stop debugging
... ''])
>>> try:
... runner.run(test)
... finally:
... sys.stdin = real_stdin
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace()
-> self.f1()
(Pdb) print y
1
(Pdb) step
--Call--
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(7)f1()
-> def f1(self):
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(8)f1()
-> x = 1
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1()
-> self.f2()
(Pdb) step
--Call--
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(11)f2()
-> def f2(self):
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(12)f2()
-> z = 1
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(13)f2()
-> z = 2
(Pdb) print z
1
(Pdb) up
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1()
-> self.f2()
(Pdb) print x
1
(Pdb) up
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace()
-> self.f1()
(Pdb) print y
1
(Pdb) up
> <doctest foo[1]>(1)<module>()
-> calls_set_trace()
(Pdb) print foo
*** NameError: name 'foo' is not defined
(Pdb) continue
TestResults(failed=0, attempted=2)
"""
def test_DocTestSuite():
"""DocTestSuite creates a unittest test suite from a doctest.
We create a Suite by providing a module. A module can be provided
by passing a module object:
>>> import unittest
>>> import test.sample_doctest
>>> suite = doctest.DocTestSuite(test.sample_doctest)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=4>
We can also supply the module by name:
>>> suite = doctest.DocTestSuite('test.sample_doctest')
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=4>
We can use the current module:
>>> suite = test.sample_doctest.test_suite()
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=4>
We can supply global variables. If we pass globs, they will be
used instead of the module globals. Here we'll pass an empty
globals, triggering an extra error:
>>> suite = doctest.DocTestSuite('test.sample_doctest', globs={})
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=5>
Alternatively, we can provide extra globals. Here we'll make an
error go away by providing an extra global variable:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... extraglobs={'y': 1})
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=3>
You can pass option flags. Here we'll cause an extra error
by disabling the blank-line feature:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=5>
You can supply setUp and tearDown functions:
>>> def setUp(t):
... import test.test_doctest
... test.test_doctest.sillySetup = True
>>> def tearDown(t):
... import test.test_doctest
... del test.test_doctest.sillySetup
Here, we installed a silly variable that the test expects:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=3>
But the tearDown restores sanity:
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
Traceback (most recent call last):
...
AttributeError: 'module' object has no attribute 'sillySetup'
The setUp and tearDown funtions are passed test objects. Here
we'll use the setUp function to supply the missing variable y:
>>> def setUp(test):
... test.globs['y'] = 1
>>> suite = doctest.DocTestSuite('test.sample_doctest', setUp=setUp)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=9 errors=0 failures=3>
Here, we didn't need to use a tearDown function because we
modified the test globals, which are a copy of the
sample_doctest module dictionary. The test globals are
automatically cleared for us after a test.
"""
def test_DocFileSuite():
"""We can test tests found in text files using a DocFileSuite.
We create a suite by providing the names of one or more text
files that include examples:
>>> import unittest
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt')
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=3 errors=0 failures=3>
The test files are looked for in the directory containing the
calling module. A package keyword argument can be provided to
specify a different relative location.
>>> import unittest
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... package='test')
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=3 errors=0 failures=3>
Support for using a package's __loader__.get_data() is also
provided.
>>> import unittest, pkgutil, test
>>> added_loader = False
>>> if not hasattr(test, '__loader__'):
... test.__loader__ = pkgutil.get_loader(test)
... added_loader = True
>>> try:
... suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... package='test')
... suite.run(unittest.TestResult())
... finally:
... if added_loader:
... del test.__loader__
<unittest.TestResult run=3 errors=0 failures=3>
'/' should be used as a path separator. It will be converted
to a native separator at run time:
>>> suite = doctest.DocFileSuite('../test/test_doctest.txt')
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=1 errors=0 failures=1>
If DocFileSuite is used from an interactive session, then files
are resolved relative to the directory of sys.argv[0]:
>>> import types, os.path, test.test_doctest
>>> save_argv = sys.argv
>>> sys.argv = [test.test_doctest.__file__]
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... package=types.ModuleType('__main__'))
>>> sys.argv = save_argv
By setting `module_relative=False`, os-specific paths may be
used (including absolute paths and paths relative to the
working directory):
>>> # Get the absolute path of the test package.
>>> test_doctest_path = os.path.abspath(test.test_doctest.__file__)
>>> test_pkg_path = os.path.split(test_doctest_path)[0]
>>> # Use it to find the absolute path of test_doctest.txt.
>>> test_file = os.path.join(test_pkg_path, 'test_doctest.txt')
>>> suite = doctest.DocFileSuite(test_file, module_relative=False)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=1 errors=0 failures=1>
It is an error to specify `package` when `module_relative=False`:
>>> suite = doctest.DocFileSuite(test_file, module_relative=False,
... package='test')
Traceback (most recent call last):
ValueError: Package may only be specified for module-relative paths.
You can specify initial global variables:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=3 errors=0 failures=2>
In this case, we supplied a missing favorite color. You can
provide doctest options:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE,
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=3 errors=0 failures=3>
And, you can provide setUp and tearDown functions:
>>> def setUp(t):
... import test.test_doctest
... test.test_doctest.sillySetup = True
>>> def tearDown(t):
... import test.test_doctest
... del test.test_doctest.sillySetup
Here, we installed a silly variable that the test expects:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=3 errors=0 failures=2>
But the tearDown restores sanity:
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
Traceback (most recent call last):
...
AttributeError: 'module' object has no attribute 'sillySetup'
The setUp and tearDown funtions are passed test objects.
Here, we'll use a setUp function to set the favorite color in
test_doctest.txt:
>>> def setUp(test):
... test.globs['favorite_color'] = 'blue'
>>> suite = doctest.DocFileSuite('test_doctest.txt', setUp=setUp)
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=1 errors=0 failures=0>
Here, we didn't need to use a tearDown function because we
modified the test globals. The test globals are
automatically cleared for us after a test.
Tests in a file run using `DocFileSuite` can also access the
`__file__` global, which is set to the name of the file
containing the tests:
>>> suite = doctest.DocFileSuite('test_doctest3.txt')
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=1 errors=0 failures=0>
If the tests contain non-ASCII characters, we have to specify which
encoding the file is encoded with. We do so by using the `encoding`
parameter:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... encoding='utf-8')
>>> suite.run(unittest.TestResult())
<unittest.TestResult run=3 errors=0 failures=2>
"""
def test_trailing_space_in_test():
"""
Trailing spaces in expected output are significant:
>>> x, y = 'foo', ''
>>> print x, y
foo \n
"""
def test_unittest_reportflags():
"""Default unittest reporting flags can be set to control reporting
Here, we'll set the REPORT_ONLY_FIRST_FAILURE option so we see
only the first failure of each test. First, we'll look at the
output without the flag. The file test_doctest.txt file has two
tests. They both fail if blank lines are disabled:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> import unittest
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
...
Failed example:
if 1:
...
Note that we see both failures displayed.
>>> old = doctest.set_unittest_reportflags(
... doctest.REPORT_ONLY_FIRST_FAILURE)
Now, when we run the test:
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
<BLANKLINE>
<BLANKLINE>
We get only the first failure.
If we give any reporting options when we set up the tests,
however:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF)
Then the default eporting options are ignored:
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
...
Failed example:
if 1:
print 'a'
print
print 'b'
Differences (ndiff with -expected +actual):
a
- <BLANKLINE>
+
b
<BLANKLINE>
<BLANKLINE>
Test runners can restore the formatting flags after they run:
>>> ignored = doctest.set_unittest_reportflags(old)
"""
def test_testfile(): r"""
Tests for the `testfile()` function. This function runs all the
doctest examples in a given file. In its simple invokation, it is
called with the name of a file, which is taken to be relative to the
calling module. The return value is (#failures, #tests).
>>> doctest.testfile('test_doctest.txt') # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
**********************************************************************
1 items had failures:
1 of 2 in test_doctest.txt
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
(Note: we'll be clearing doctest.master after each call to
`doctest.testfile`, to supress warnings about multiple tests with the
same name.)
Globals may be specified with the `globs` and `extraglobs` parameters:
>>> globs = {'favorite_color': 'blue'}
>>> doctest.testfile('test_doctest.txt', globs=globs)
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
>>> extraglobs = {'favorite_color': 'red'}
>>> doctest.testfile('test_doctest.txt', globs=globs,
... extraglobs=extraglobs) # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Expected:
'blue'
Got:
'red'
**********************************************************************
1 items had failures:
1 of 2 in test_doctest.txt
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The file may be made relative to a given module or package, using the
optional `module_relative` parameter:
>>> doctest.testfile('test_doctest.txt', globs=globs,
... module_relative='test')
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
Verbosity can be increased with the optional `verbose` paremter:
>>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True)
Trying:
favorite_color
Expecting:
'blue'
ok
Trying:
if 1:
print 'a'
print
print 'b'
Expecting:
a
<BLANKLINE>
b
ok
1 items passed all tests:
2 tests in test_doctest.txt
2 tests in 1 items.
2 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
The name of the test may be specified with the optional `name`
parameter:
>>> doctest.testfile('test_doctest.txt', name='newname')
... # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in newname
...
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The summary report may be supressed with the optional `report`
parameter:
>>> doctest.testfile('test_doctest.txt', report=False)
... # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The optional keyword argument `raise_on_error` can be used to raise an
exception on the first error (which may be useful for postmortem
debugging):
>>> doctest.testfile('test_doctest.txt', raise_on_error=True)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
UnexpectedException: ...
>>> doctest.master = None # Reset master.
If the tests contain non-ASCII characters, the tests might fail, since
it's unknown which encoding is used. The encoding can be specified
using the optional keyword argument `encoding`:
>>> doctest.testfile('test_doctest4.txt') # doctest: +ELLIPSIS
**********************************************************************
File "...", line 7, in test_doctest4.txt
Failed example:
u'...'
Expected:
u'f\xf6\xf6'
Got:
u'f\xc3\xb6\xc3\xb6'
**********************************************************************
...
**********************************************************************
1 items had failures:
2 of 4 in test_doctest4.txt
***Test Failed*** 2 failures.
TestResults(failed=2, attempted=4)
>>> doctest.master = None # Reset master.
>>> doctest.testfile('test_doctest4.txt', encoding='utf-8')
TestResults(failed=0, attempted=4)
>>> doctest.master = None # Reset master.
"""
# old_test1, ... used to live in doctest.py, but cluttered it. Note
# that these use the deprecated doctest.Tester, so should go away (or
# be rewritten) someday.
# Ignore all warnings about the use of class Tester in this module.
# Note that the name of this module may differ depending on how it's
# imported, so the use of __name__ is important.
warnings.filterwarnings("ignore", "class Tester", DeprecationWarning,
__name__, 0)
def old_test1(): r"""
>>> from doctest import Tester
>>> t = Tester(globs={'x': 42}, verbose=0)
>>> t.runstring(r'''
... >>> x = x * 2
... >>> print x
... 42
... ''', 'XYZ')
**********************************************************************
Line 3, in XYZ
Failed example:
print x
Expected:
42
Got:
84
TestResults(failed=1, attempted=2)
>>> t.runstring(">>> x = x * 2\n>>> print x\n84\n", 'example2')
TestResults(failed=0, attempted=2)
>>> t.summarize()
**********************************************************************
1 items had failures:
1 of 2 in XYZ
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=4)
>>> t.summarize(verbose=1)
1 items passed all tests:
2 tests in example2
**********************************************************************
1 items had failures:
1 of 2 in XYZ
4 tests in 2 items.
3 passed and 1 failed.
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=4)
"""
def old_test2(): r"""
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=1)
>>> test = r'''
... # just an example
... >>> x = 1 + 2
... >>> x
... 3
... '''
>>> t.runstring(test, "Example")
Running string Example
Trying:
x = 1 + 2
Expecting nothing
ok
Trying:
x
Expecting:
3
ok
0 of 2 examples failed in string Example
TestResults(failed=0, attempted=2)
"""
def old_test3(): r"""
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=0)
>>> def _f():
... '''Trivial docstring example.
... >>> assert 2 == 2
... '''
... return 32
...
>>> t.rundoc(_f) # expect 0 failures in 1 example
TestResults(failed=0, attempted=1)
"""
def old_test4(): """
>>> import types
>>> m1 = types.ModuleType('_m1')
>>> m2 = types.ModuleType('_m2')
>>> test_data = \"""
... def _f():
... '''>>> assert 1 == 1
... '''
... def g():
... '''>>> assert 2 != 1
... '''
... class H:
... '''>>> assert 2 > 1
... '''
... def bar(self):
... '''>>> assert 1 < 2
... '''
... \"""
>>> exec test_data in m1.__dict__
>>> exec test_data in m2.__dict__
>>> m1.__dict__.update({"f2": m2._f, "g2": m2.g, "h2": m2.H})
Tests that objects outside m1 are excluded:
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=0)
>>> t.rundict(m1.__dict__, "rundict_test", m1) # f2 and g2 and h2 skipped
TestResults(failed=0, attempted=4)
Once more, not excluding stuff outside m1:
>>> t = Tester(globs={}, verbose=0)
>>> t.rundict(m1.__dict__, "rundict_test_pvt") # None are skipped.
TestResults(failed=0, attempted=8)
The exclusion of objects from outside the designated module is
meant to be invoked automagically by testmod.
>>> doctest.testmod(m1, verbose=False)
TestResults(failed=0, attempted=4)
"""
######################################################################
## Main
######################################################################
def test_main():
# Check the doctest cases in doctest itself:
test_support.run_doctest(doctest, verbosity=True)
# Check the doctest cases defined here:
from test import test_doctest
test_support.run_doctest(test_doctest, verbosity=True)
import trace, sys
def test_coverage(coverdir):
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,],
trace=0, count=1)
tracer.run('reload(doctest); test_main()')
r = tracer.results()
print 'Writing coverage results...'
r.write_results(show_missing=True, summary=True,
coverdir=coverdir)
if __name__ == '__main__':
if '-c' in sys.argv:
test_coverage('/tmp/doctest.cover')
else:
test_main()
| apache-2.0 |
rrader/nova-docker-plugin | plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py | 28 | 7255 | #!/usr/bin/env python
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# XenAPI plugin for reading/writing information to xenstore
#
try:
import json
except ImportError:
import simplejson as json
import utils # noqa
import XenAPIPlugin
import pluginlib_nova as pluginlib # noqa
pluginlib.configure_logging("xenstore")
class XenstoreError(pluginlib.PluginError):
"""Errors that occur when calling xenstore-* through subprocesses."""
def __init__(self, cmd, return_code, stderr, stdout):
msg = "cmd: %s; returncode: %d; stderr: %s; stdout: %s"
msg = msg % (cmd, return_code, stderr, stdout)
self.cmd = cmd
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
pluginlib.PluginError.__init__(self, msg)
def jsonify(fnc):
def wrapper(*args, **kwargs):
ret = fnc(*args, **kwargs)
try:
json.loads(ret)
except ValueError:
# Value should already be JSON-encoded, but some operations
# may write raw sting values; this will catch those and
# properly encode them.
ret = json.dumps(ret)
return ret
return wrapper
def _record_exists(arg_dict):
"""Returns whether or not the given record exists. The record path
is determined from the given path and dom_id in the arg_dict.
"""
cmd = ["xenstore-exists", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
_run_command(cmd)
return True
except XenstoreError, e: # noqa
if e.stderr == '':
# if stderr was empty, this just means the path did not exist
return False
# otherwise there was a real problem
raise
@jsonify
def read_record(self, arg_dict):
"""Returns the value stored at the given path for the given dom_id.
These must be encoded as key/value pairs in arg_dict. You can
optinally include a key 'ignore_missing_path'; if this is present
and boolean True, attempting to read a non-existent path will return
the string 'None' instead of raising an exception.
"""
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
result = _run_command(cmd)
return result.strip()
except XenstoreError, e: # noqa
if not arg_dict.get("ignore_missing_path", False):
raise
if not _record_exists(arg_dict):
return "None"
# Just try again in case the agent write won the race against
# the record_exists check. If this fails again, it will likely raise
# an equally meaningful XenstoreError as the one we just caught
result = _run_command(cmd)
return result.strip()
@jsonify
def write_record(self, arg_dict):
"""Writes to xenstore at the specified path. If there is information
already stored in that location, it is overwritten. As in read_record,
the dom_id and path must be specified in the arg_dict; additionally,
you must specify a 'value' key, whose value must be a string. Typically,
you can json-ify more complex values and store the json output.
"""
cmd = ["xenstore-write",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict,
arg_dict["value"]]
_run_command(cmd)
return arg_dict["value"]
@jsonify
def list_records(self, arg_dict):
"""Returns all the stored data at or below the given path for the
given dom_id. The data is returned as a json-ified dict, with the
path as the key and the stored value as the value. If the path
doesn't exist, an empty dict is returned.
"""
dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict
cmd = ["xenstore-ls", dirpath.rstrip("/")]
try:
recs = _run_command(cmd)
except XenstoreError, e: # noqa
if not _record_exists(arg_dict):
return {}
# Just try again in case the path was created in between
# the "ls" and the existence check. If this fails again, it will
# likely raise an equally meaningful XenstoreError
recs = _run_command(cmd)
base_path = arg_dict["path"]
paths = _paths_from_ls(recs)
ret = {}
for path in paths:
if base_path:
arg_dict["path"] = "%s/%s" % (base_path, path)
else:
arg_dict["path"] = path
rec = read_record(self, arg_dict)
try:
val = json.loads(rec)
except ValueError:
val = rec
ret[path] = val
return ret
@jsonify
def delete_record(self, arg_dict):
"""Just like it sounds: it removes the record for the specified
VM and the specified path from xenstore.
"""
cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
return _run_command(cmd)
except XenstoreError, e: # noqa
if 'could not remove path' in e.stderr:
# Entry already gone. We're good to go.
return ''
raise
def _paths_from_ls(recs):
"""The xenstore-ls command returns a listing that isn't terribly
useful. This method cleans that up into a dict with each path
as the key, and the associated string as the value.
"""
last_nm = ""
level = 0
path = []
ret = []
for ln in recs.splitlines():
nm, val = ln.rstrip().split(" = ")
barename = nm.lstrip()
this_level = len(nm) - len(barename)
if this_level == 0:
ret.append(barename)
level = 0
path = []
elif this_level == level:
# child of same parent
ret.append("%s/%s" % ("/".join(path), barename))
elif this_level > level:
path.append(last_nm)
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
elif this_level < level:
path = path[:this_level]
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
last_nm = barename
return ret
def _run_command(cmd):
"""Wrap utils.run_command to raise XenstoreError on failure
"""
try:
return utils.run_command(cmd)
except utils.SubprocessException, e: # noqa
raise XenstoreError(e.cmdline, e.ret, e.err, e.out)
if __name__ == "__main__":
XenAPIPlugin.dispatch(
{"read_record": read_record,
"write_record": write_record,
"list_records": list_records,
"delete_record": delete_record})
| apache-2.0 |
Titulacion-Sistemas/PythonTitulacion-EV | Lib/site-packages/docutils/writers/pep_html/__init__.py | 124 | 3507 | # $Id: __init__.py 7630 2013-03-15 22:27:04Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
PEP HTML Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import codecs
import docutils
from docutils import frontend, nodes, utils, writers
from docutils.writers import html4css1
class Writer(html4css1.Writer):
default_stylesheet = 'pep.css'
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = html4css1.Writer.settings_spec + (
'PEP/HTML-Specific Options',
'For the PEP/HTML writer, the default value for the --stylesheet-path '
'option is "%s", and the default value for --template is "%s". '
'See HTML-Specific Options above.'
% (default_stylesheet_path, default_template_path),
(('Python\'s home URL. Default is "http://www.python.org".',
['--python-home'],
{'default': 'http://www.python.org', 'metavar': '<URL>'}),
('Home URL prefix for PEPs. Default is "." (current directory).',
['--pep-home'],
{'default': '.', 'metavar': '<URL>'}),
# For testing.
(frontend.SUPPRESS_HELP,
['--no-random'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_default_overrides = {'stylesheet_path': default_stylesheet_path,
'template': default_template_path,}
relative_path_settings = ('template',)
config_section = 'pep_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = HTMLTranslator
def interpolation_dict(self):
subs = html4css1.Writer.interpolation_dict(self)
settings = self.document.settings
pyhome = settings.python_home
subs['pyhome'] = pyhome
subs['pephome'] = settings.pep_home
if pyhome == '..':
subs['pepindex'] = '.'
else:
subs['pepindex'] = pyhome + '/dev/peps'
index = self.document.first_child_matching_class(nodes.field_list)
header = self.document[index]
self.pepnum = header[0][1].astext()
subs['pep'] = self.pepnum
if settings.no_random:
subs['banner'] = 0
else:
import random
subs['banner'] = random.randrange(64)
try:
subs['pepnum'] = '%04i' % int(self.pepnum)
except ValueError:
subs['pepnum'] = self.pepnum
self.title = header[1][1].astext()
subs['title'] = self.title
subs['body'] = ''.join(
self.body_pre_docinfo + self.docinfo + self.body)
return subs
def assemble_parts(self):
html4css1.Writer.assemble_parts(self)
self.parts['title'] = [self.title]
self.parts['pepnum'] = self.pepnum
class HTMLTranslator(html4css1.HTMLTranslator):
def depart_field_list(self, node):
html4css1.HTMLTranslator.depart_field_list(self, node)
if 'rfc2822' in node['classes']:
self.body.append('<hr />\n')
| mit |
muffinresearch/amo-validator | tests/test_js_settimeout.py | 8 | 1269 | from js_helper import _do_test_raw
def test_settimeout_fail():
'Test cases in which setTimeout should fail'
assert _do_test_raw("""
setTimeout("abc.def()", 1000);
""").failed()
assert _do_test_raw("""
window["set" + "Timeout"]("abc.def()", 1000);
""").failed()
assert _do_test_raw("""
var x = "foo.bar()";
setTimeout(x, 1000);
""").failed()
assert _do_test_raw("""
var x = "foo.bar()";
window["set" + "Timeout"](x, 1000);
""").failed()
def test_settimeout_pass():
'Test cases in which setTimeout should be allowed'
assert not _do_test_raw("""
setTimeout(function(){foo.bar();}, 1000);
""").failed()
assert not _do_test_raw("""
window["set" + "Timeout"](function(){foo.bar();}, 1000);
""").failed()
assert not _do_test_raw("""
setTimeout();
""").failed()
assert not _do_test_raw("""
window["set" + "Timeout"]();
""").failed()
def test_settimeout_bind_pass():
'Test cases in which setTimeout should be allowed'
assert not _do_test_raw("""
setTimeout(function(){foo.bar();}.bind(this), 1000);
""").failed()
assert not _do_test_raw("""
window["set" + "Timeout"](function(){foo.bar();}.bind(this), 1000);
""").failed()
| bsd-3-clause |
simsynser/SimSyn | SimSyn.py | 1 | 38537 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SimSyn_v02.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import psycopg2
from psycopg2.extensions import AsIs
import time
import pysd
import pandas as pd
from os import path
QtCore.QCoreApplication.addLibraryPath(path.join(path.dirname(QtCore.__file__), "plugins"))
QtGui.QImageReader.supportedImageFormats()
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
#Create main window, insert central widget into main window (child of main window)
#Add grid layout as child of central widget
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(600, 370)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("Local_v01-001.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.v = QtGui.QGridLayout(self.centralwidget)
self.v.setObjectName(_fromUtf8("v"))
#Put spacer between croup boxes
spacerItem1 = QtGui.QSpacerItem(40, 5, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.v.addItem(spacerItem1, 0, 0, 1, 1)
#Create 1st Group Box: Set Connections
self.GroupBox_Connect = QtGui.QGroupBox(self.centralwidget) #Assign 1st group box to central widget
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.GroupBox_Connect.setFont(font)
self.GroupBox_Connect.setObjectName(_fromUtf8("GroupBox_Connect"))
self.gridLayout = QtGui.QGridLayout(self.GroupBox_Connect) #Assign gridLayout to 1st group box
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.Db_Name = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Db_Name.setFont(font)
self.Db_Name.setObjectName(_fromUtf8("Db_Name"))
self.gridLayout.addWidget(self.Db_Name, 0, 0, 1, 1)
self.Host_Name = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Host_Name.setFont(font)
self.Host_Name.setObjectName(_fromUtf8("Host_Name"))
self.gridLayout.addWidget(self.Host_Name, 0, 1, 1, 1)
self.User_Name = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.User_Name.setFont(font)
self.User_Name.setObjectName(_fromUtf8("User_Name"))
self.gridLayout.addWidget(self.User_Name, 1, 0, 1, 1)
self.Password = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Password.setFont(font)
self.Password.setObjectName(_fromUtf8("Password"))
self.gridLayout.addWidget(self.Password, 1, 1, 1, 1)
self.Port = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Port.setFont(font)
self.Port.setObjectName(_fromUtf8("Port"))
self.gridLayout.addWidget(self.Port, 2, 0, 1, 1)
self.Table_Name = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Table_Name.setFont(font)
self.Table_Name.setObjectName(_fromUtf8("Table_Name"))
self.gridLayout.addWidget(self.Table_Name, 2, 1, 1, 1)
self.Btn_Connect = QtGui.QPushButton(self.GroupBox_Connect)
self.Btn_Connect.setEnabled(True)
font.setBold(False)
font.setWeight(50)
self.Btn_Connect.setFont(font)
self.Btn_Connect.setFixedWidth(150)
self.Btn_Connect.setFixedHeight(20)
self.Btn_Connect.setObjectName(_fromUtf8("Btn_Connect"))
self.gridLayout.addWidget(self.Btn_Connect, 3, 0, 1, 1)
self.v.addWidget(self.GroupBox_Connect, 1, 0, 1, 1) #Add 1st group box to master grid layout
self.Host_Name.raise_() #Raise widgets to the top of the parent widget's stack. After this call widget will be visually in front of any overlapping sibling widgets
self.Password.raise_()
self.User_Name.raise_()
self.Table_Name.raise_()
self.Port.raise_()
self.Db_Name.raise_()
self.Btn_Connect.raise_()
self.GroupBox_Connect.raise_()
#But spacer between croup boxes
spacerItem2 = QtGui.QSpacerItem(40, 10, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.v.addItem(spacerItem2, 2, 0, 1, 1)
#Create 2nd Group Box: Build Vensim Model
self.GroupBox_build = QtGui.QGroupBox(self.centralwidget) #Assign 1st group box to central widget
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.GroupBox_build.setFont(font)
self.GroupBox_build.setObjectName(_fromUtf8("GroupBox_Connect"))
self.gridLayout2 = QtGui.QGridLayout(self.GroupBox_build) #Assign gridLayout to 1st group box
self.gridLayout2.setObjectName(_fromUtf8("gridLayout2"))
self.Model_Dir = QtGui.QLineEdit(self.GroupBox_build)
self.gridLayout2.addWidget(self.Model_Dir, 0, 0, 1, 2)
self.sub_gridLayout = QtGui.QGridLayout(self.GroupBox_build) #Subgrid layout to backage 'browse-button' and 'Build-VENSIM-Model-Button' gridLayout
self.sub_gridLayout.setObjectName(_fromUtf8("sub_gridLayout"))
self.gridLayout2.addLayout(self.sub_gridLayout, 0,2,1,1)
self.Btn_Browse_Ven = QtGui.QPushButton(self.GroupBox_build)
self.Btn_Browse_Ven.setObjectName(_fromUtf8("Btn_Browse_Ven"))
self.Btn_Browse_Ven.setFixedWidth(25)
self.Btn_Browse_Ven.setFixedHeight(20)
self.sub_gridLayout.addWidget(self.Btn_Browse_Ven, 0, 0, 1, 1)
self.Btn_Build = QtGui.QPushButton(self.GroupBox_build)
self.Btn_Build.setEnabled(True)
font.setBold(False)
font.setWeight(50)
self.Btn_Build.setFont(font)
self.Btn_Build.setFixedWidth(90)
self.Btn_Build.setFixedHeight(20)
self.Btn_Build.setObjectName(_fromUtf8("Btn_Build"))
self.sub_gridLayout.addWidget(self.Btn_Build, 0, 1, 1, 1)
self.v.addWidget(self.GroupBox_build, 3, 0, 1, 1) #Add 2nd group box to master grid layout
#But spacer between croup boxes
spacerItem3 = QtGui.QSpacerItem(40, 10, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.v.addItem(spacerItem3, 4, 0, 1, 1)
#Create 3rd Group Box: Connection Settings
self.GroupBox_Settings = QtGui.QGroupBox(self.centralwidget) #Assign 3rd group box to central widget
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.GroupBox_Settings.setFont(font)
self.GroupBox_Settings.setObjectName(_fromUtf8("GroupBox_Settings"))
self.gridLayout_3 = QtGui.QGridLayout(self.GroupBox_Settings) #Assign gridLayout2 to 2nd group box
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.Link_Tab_Frame = QtGui.QFrame(self.GroupBox_Settings) #Create "Link_Tab_Frame" (Child) as subcontainer of "GroupBox_Settings" (Parent)
self.Link_Tab_Frame.setObjectName(_fromUtf8("Link_Tab_Frame"))
self.gridLayout_3.addWidget(self.Link_Tab_Frame, 2, 0, 3, 1) #Add subcontainer to grid Layout of the "GroupBox_Settings" parent object
self.gridLayout_frame = QtGui.QGridLayout(self.Link_Tab_Frame) #Create new grid Layout within "Link_Tab_Frame" subcontainer
self.gridLayout_frame.setObjectName(_fromUtf8("gridLayout_frame"))
self.gridLayout_frame.setContentsMargins(0, 0, 0, 0)
self.Link_Tab = QtGui.QTableWidget(self.Link_Tab_Frame)
self.Link_Tab.setObjectName(_fromUtf8("Link_Tab"))
self.Link_Tab.setColumnCount(4)
self.Link_Tab.horizontalHeader().setFixedHeight(20)
self.Link_Tab.setHorizontalHeaderLabels(["Model Variable", "Database Variable", "Use as", "Time (optional)"])
stylesheet = "QHeaderView::section{Background-color:rgb(90,90,90); border-radius:15px;border-right:1px solid #FFFFFF;}"
self.Link_Tab.horizontalHeader().setStyleSheet(stylesheet)
font = QtGui.QFont()
font.setBold(True)
font.setPointSize(8.5)
for c in range(self.Link_Tab.columnCount()):
self.Link_Tab.setColumnWidth(c, 130)
self.Link_Tab.horizontalHeaderItem(c).setFont(font)
self.Link_Tab.horizontalHeaderItem(c).setForeground(QtGui.QBrush(QtGui.QColor(255,255,255)))
self.gridLayout_frame.addWidget(self.Link_Tab, 0, 0, 1, 1)
self.Button_Frame = QtGui.QFrame(self.GroupBox_Settings)
self.Button_Frame.setObjectName(_fromUtf8("Button_Frame"))
self.gridLayout_3.addWidget(self.Button_Frame, 2, 1, 1, 2)
self.gridLayout_button = QtGui.QGridLayout(self.Button_Frame)
self.gridLayout_button.setObjectName(_fromUtf8("gridLayout_5"))
self.gridLayout_button.setContentsMargins(0, 0, 0, 0)
self.Btn_Plus = QtGui.QPushButton(self.GroupBox_Settings)
self.Btn_Plus.setEnabled(True)
self.Btn_Plus.setObjectName(_fromUtf8("Btn_Plus"))
self.Btn_Plus.setFixedHeight(20)
self.Btn_Plus.setFixedWidth(20)
self.gridLayout_button.addWidget(self.Btn_Plus, 0, 0, 1, 1)
self.Btn_Minus = QtGui.QPushButton(self.GroupBox_Settings)
self.Btn_Minus.setEnabled(True)
self.Btn_Minus.setObjectName(_fromUtf8("Btn_Minus"))
self.Btn_Minus.setFixedHeight(20)
self.Btn_Minus.setFixedWidth(20)
self.gridLayout_button.addWidget(self.Btn_Minus, 1, 0, 1, 1)
self.Btn_Reset = QtGui.QPushButton(self.GroupBox_Settings)
self.Btn_Reset.setEnabled(True)
font.setBold(False)
font.setWeight(50)
self.Btn_Reset.setFont(font)
self.Btn_Reset.setFixedHeight(20)
self.Btn_Reset.setFixedWidth(70)
self.Btn_Reset.setObjectName(_fromUtf8("Btn_Reset"))
self.gridLayout_button.addWidget(self.Btn_Reset, 2, 0, 1, 1)
self.Btn_Run = QtGui.QPushButton(self.GroupBox_Settings)
self.Btn_Run.setEnabled(True)
font.setBold(False)
font.setWeight(50)
self.Btn_Run.setFont(font)
self.Btn_Run.setFixedHeight(20)
self.Btn_Run.setFixedWidth(70)
self.Btn_Run.setObjectName(_fromUtf8("Btn_Run"))
self.gridLayout_button.addWidget(self.Btn_Run, 3, 0, 1, 1)
self.Progress_Run = QtGui.QProgressBar(self.GroupBox_Settings)
self.Progress_Run.setProperty("value", 0)
self.Progress_Run.setFixedHeight(15)
self.Progress_Run.setFixedWidth(70)
self.Progress_Run.setVisible(False) #Progress bar is not visible before compress button is pushed
self.Progress_Run.setObjectName(_fromUtf8("Progress_Run"))
self.gridLayout_button.addWidget(self.Progress_Run, 4, 0, 1, 1)
self.v.addWidget(self.GroupBox_Settings, 5, 0, 1, 1) #Add 2nd group box to master grid layout
self.GroupBox_Settings.raise_()
#But spacer between croup boxes
spacerItem4 = QtGui.QSpacerItem(40, 5, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.v.addItem(spacerItem4, 6, 0, 1, 1)
#?????????????????
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "SimSyn", None))
self.GroupBox_Connect.setTitle(_translate("MainWindow", "Database Connection", None))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(60)
self.GroupBox_Connect.setFont(font)
self.GroupBox_build.setTitle(_translate("MainWindow", "Model Connection", None))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(60)
self.GroupBox_build.setFont(font)
self.GroupBox_Settings.setTitle(_translate("MainWindow", "Data Link(s)", None))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(60)
self.GroupBox_Settings.setFont(font)
self.Password.placeholderText()
self.Password.setPlaceholderText(_translate("MainWindow", "Password", None))
self.Port.placeholderText()
self.Port.setPlaceholderText(_translate("MainWindow", "Port", None))
self.User_Name.placeholderText()
self.User_Name.setPlaceholderText(_translate("MainWindow", "User Name", None))
self.Host_Name.placeholderText()
self.Host_Name.setPlaceholderText(_translate("MainWindow", "Host", None))
self.Table_Name.placeholderText()
self.Table_Name.setPlaceholderText(_translate("MainWindow", "Table Name [comma delimit multiple tables]", None))
self.Db_Name.placeholderText()
self.Db_Name.setPlaceholderText(_translate("MainWindow", "Database Name", None))
self.Btn_Connect.setText(_translate("MainWindow", "Connect Database", None))
self.Btn_Build.setText(_translate("MainWindow", "Load Model", None))
self.Btn_Run.setText(_translate("MainWindow", "Run", None))
self.Btn_Reset.setText(_translate("MainWindow", "Reset...", None))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Icons/BrowseIcon_v01.png"))
self.Btn_Browse_Ven.setIcon(icon)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("Icons/PlusIcon3.png")) #Cannot add vector svg!!!
self.Btn_Plus.setIcon(icon2)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("Icons/MinusIcon3.png")) #Cannot add vector svg!!!
self.Btn_Minus.setIcon(icon3)
#Execute functions, if buttons are clicked
self.Btn_Connect.clicked.connect(self.connectDb)
self.Btn_Browse_Ven.clicked.connect(self.browse)
self.Btn_Build.clicked.connect(self.loadVen)
self.Btn_Plus.clicked.connect(self.addrow)
self.Btn_Minus.clicked.connect(self.removerow)
self.Btn_Reset.clicked.connect(self.reset)
self.Btn_Run.clicked.connect(self.run)
def connectDb(self):
#Get DB specification
name_db = self.Db_Name.text()
user_nm = self.User_Name.text()
host = self.Host_Name.text()
passwrd = self.Password.text()
pt = self.Port.text()
#Format table names as 'Table 1', 'Table 2', 'Table 3' ...
self.name_tb = ""
for t in self.Table_Name.text().split(','):
if self.name_tb != "":
self.name_tb += ","
self.name_tb += "'" + t + "'"
#Delete empty space, if required
try:
self.name_tb.replace(" ", "")
except:
pass
#Access Database, get column names of tables and save them as a self attribute of the class
try:
self.con = psycopg2.connect(dbname = name_db, host = host, port = int(pt), user = user_nm, password = passwrd)
curs = self.con.cursor()
curs.execute("SELECT column_name, table_name FROM information_schema.columns WHERE table_name IN (%s);" % AsIs(str(self.name_tb)))
self.tb_columns = curs.fetchall()
try:
self.Progress_Label_con.clear() #Reset label
except:
pass
if len(self.tb_columns) == 0:
self.error_message = QtGui.QMessageBox(self.centralwidget)
self.error_message.setIcon(QtGui.QMessageBox.Critical)
self.error_message.setWindowTitle("Connection Info")
self.error_message.setText("Unable to connect to database!")
self.error_message.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message.exec_()
else:
font = QtGui.QFont()
font.setBold(False)
font.setWeight(15)
self.Progress_Label_con = QtGui.QLabel()
self.Progress_Label_con.setFont(font)
self.gridLayout.addWidget(self.Progress_Label_con, 3, 1, 1, 1)
self.Progress_Label_con.setText("Connected to " + self.name_tb)
except:
self.error_message = QtGui.QMessageBox(self.centralwidget)
self.error_message.setIcon(QtGui.QMessageBox.Critical)
self.error_message.setWindowTitle("Connection Info")
self.error_message.setText("Unable to connect to database!")
self.error_message.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message.exec_()
def browse(self):
self.Model_Dir.setText(QtGui.QFileDialog.getOpenFileName())
def loadVen(self):
try:
ven_path = str(self.Model_Dir.text())
self.model = pysd.read_vensim(ven_path)
self.message = QtGui.QMessageBox(self.centralwidget)
self.message.setIcon(QtGui.QMessageBox.Information)
self.message.setWindowTitle("Load VENSIM")
self.message.setText("Successfully connected to '" + str(self.model.__str__.split("/")[-1]) + "'")
self.message.setStandardButtons(QtGui.QMessageBox.Ok)
self.message.exec_()
except:
self.error_message1 = QtGui.QMessageBox(self.centralwidget) #self.centralwidget is used as parent to so that messagebox is centered above parent
self.error_message1.setWindowTitle("Load VENSIM")
self.error_message1.setText("Couldn't connect to VENSIM model.")
self.error_message1.setIcon(QtGui.QMessageBox.Critical)
self.error_message1.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message1.exec_()
def addrow(self):
labels = []
elem = []
try:
#Get name (labels) of PostgreSQL table columns
for c in self.tb_columns:
labels.append(c[0])
#Get name (labels) VENSIM model elements
for i in dir(self.model.components):
if i[0] != "_" and i not in ['doc', 'time', 'time_step', 't', 'state_vector', 'state', 'final_time', 'functions', 'reset_state', 'saveper', 'd_dt']: #Not very clean solution for filtering model elements!
elem.append(i)
except:
pass
if (len(labels) == 0) or (len(elem) == 0):
self.error_message2 = QtGui.QMessageBox(self.centralwidget)
self.error_message2.setIcon(QtGui.QMessageBox.Critical)
self.error_message2.setWindowTitle("Input Info")
self.error_message2.setText("No database dataset or no VENSIM model loaded!")
self.error_message2.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message2.exec_()
else:
#Add Combobox as item to table
rowPosition = self.Link_Tab.rowCount()
self.Link_Tab.insertRow(rowPosition)
self.Link_Tab.setRowHeight(rowPosition, 20)
self.ven_var = QtGui.QComboBox()
self.ven_var.addItems(labels)
self.post_var = QtGui.QComboBox()
self.post_var.addItems(elem)
self.use = QtGui.QComboBox()
self.use.addItems(["Time Series", "Subscript"])
self.time_edit = QtGui.QLineEdit()
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.time_edit.setFont(font)
self.time_edit.setObjectName(_fromUtf8("time_edit"))
#self.time_edit.setText("<Default is constant>")
self.time_edit.setPlaceholderText("<Default is constant>")
self.Link_Tab.setCellWidget(rowPosition,0,self.post_var)
self.Link_Tab.setCellWidget(rowPosition,1,self.ven_var)
self.Link_Tab.setCellWidget(rowPosition,2,self.use)
self.Link_Tab.setCellWidget(rowPosition,3,self.time_edit)
def removerow(self):
rowPosition = self.Link_Tab.rowCount()
self.Link_Tab.removeRow((rowPosition - 1))
def reset(self):
self.message1 = QtGui.QMessageBox(self.centralwidget)
self.message1.setIcon(QtGui.QMessageBox.Information)
self.message1.setWindowTitle("Reset Info")
self.message1.setText("Outputs ('sim_out' table) of a previous run will be deleted!")
self.message1.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
retval = self.message1.exec_()
if retval == 1024:
curs = self.con.cursor()
try:
curs.execute(""" DROP TABLE sim_out; """)
self.con.commit()
del curs
self.Progress_Run.setVisible(False)
except:
self.con.commit()
del curs
#So far users cannot decide what to consider in the output!!
#Only VENSIM Stocks are written to separate PostGIS Table Columns
def run(self):
#Catch runtime error, if no links are set in table
#Consider: Links can only be set if DB is connected and Simulation Input is selected
if len(self.Link_Tab.findChildren(QtGui.QComboBox)) == 0:
self.error_message3 = QtGui.QMessageBox(self.centralwidget)
self.error_message3.setIcon(QtGui.QMessageBox.Critical)
self.error_message3.setWindowTitle("Compression Info")
self.error_message3.setText("No application links selected")
self.error_message3.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message3.exec_()
#Catch exception: sim_out already exist --> Old compression schema (merge_id, sim_out) blocks compression
bool_tab_exist = 0
try:
curs = self.con.cursor()
curs.execute(""" SELECT * FROM sim_out WHERE gid = 1; """)
bool_tab_exist = 1
del curs
except:
self.con.commit()
del curs
if bool_tab_exist == 1:
self.error_message5 = QtGui.QMessageBox(self.centralwidget)
self.error_message5.setIcon(QtGui.QMessageBox.Critical)
self.error_message5.setWindowTitle("Compression Info")
self.error_message5.setText("Output already exists. Make a reset before execution.")
self.error_message5.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message5.exec_()
#Count subscripting links
subscripting_count = 0
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Subscript":
subscripting_count += 1
##make sure that subscripting table has 'geometry column'
#Get name of subscripting table
subscripting_table = []
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Subscript":
subscripting_table.append(self.tb_columns[self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentIndex()][1])
subscripting_table = list(set(subscripting_table))
print subscripting_table
bool_tab_geom = 0
try:
curs = self.con.cursor()
curs.execute("SELECT geom FROM %s WHERE gid = 1;" % AsIs(str(subscripting_table[0])))
bool_tab_geom = 1
del curs
except:
self.con.commit()
del curs
#Create sim_out table with geometry column (input subscripting table with geometry)
if (subscripting_count > 0) and (bool_tab_geom == 1) and (bool_tab_exist == 0):
try:
curs = self.con.cursor()
curs.execute("CREATE TABLE sim_out AS SELECT geom, gid FROM %s; ALTER TABLE sim_out ADD PRIMARY KEY (gid);" % AsIs(subscripting_table[0])) #copy geom as spatial and gid as non-spatial foreign key
self.con.commit() #gid is simultaniously used as foreign and primary key in 'sim_out' as table relation is 1 to 1
del curs
except psycopg2.OperationalError:
self.error_message7 = QtGui.QMessageBox(self.centralwidget)
self.error_message7.setIcon(QtGui.QMessageBox.critical)
self.error_message7.setWindowTitle("Operational Error 1")
self.error_message7.setText("Size of array exceeded (see Documentation)")
self.error_message7.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message7.exec_()
curs = self.con.cursor()
self.con.commit()
del curs
#Create sim_out table without geometry column (input subscripting table is non-spatial)
if (subscripting_count > 0) and (bool_tab_geom == 0) and (bool_tab_exist == 0):
try:
curs = self.con.cursor()
curs.execute("CREATE TABLE sim_out AS SELECT gid FROM %s; ALTER TABLE sim_out ADD PRIMARY KEY (gid);" % AsIs(subscripting_table[0])) #copy gid as non-spatial foreign key
self.con.commit() #gid is simultaniously used as foreign and primary key in 'sim_out' as table relation is 1 to 1
del curs
except psycopg2.OperationalError:
self.error_message7 = QtGui.QMessageBox(self.centralwidget)
self.error_message7.setIcon(QtGui.QMessageBox.critical)
self.error_message7.setWindowTitle("Operational Error 1")
self.error_message7.setText("Size of array exceeded (see Documentation)")
self.error_message7.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message7.exec_()
#Assign time series to model
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Time Series":
#Get name of time series table
time_series_tb = []
time_series_tb.append(self.tb_columns[self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentIndex()][1])
time_series_tb = list(set(time_series_tb))
#print time_series_tb
#Fetch time series data from database
curs = self.con.cursor()
field = self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentText()
curs.execute("SELECT %s FROM %s;" % (field, time_series_tb[0]))
time_series = curs.fetchall()
#Assign data to model
pandas_t = []
pandas_d = []
for t,d in enumerate(time_series):
pandas_t.append(t)
pandas_d.append(d[0])
time_series_pd = pd.Series(index=pandas_t, data=pandas_d)
ven_var_ts = self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 2].currentText()
self.model.set_components(params={str(ven_var_ts):time_series_pd})
#Only run subscripting procedure, if at least one subscripting link is selected
if (subscripting_count != 0) and (bool_tab_exist == 0):
#Get Table Links as List [[VENSIM Var. Name 1, PostGIS Var. Name 1, Time1], [VENSIM Var. Name 2, PostGIS Var. Name 2, Time2], [...], ...]
table_links = []
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Subscript":
if str(self.Link_Tab.findChildren(QtGui.QLineEdit)[((idx + 1) / 3) - 1].text()) != "":
table_links.append([str(self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 2].currentText()),
str(self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentText()), str(self.Link_Tab.findChildren(QtGui.QLineEdit)[((idx + 1) / 3) - 1].text())])
else:
table_links.append([str(self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 2].currentText()),
str(self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentText()), str(self.Link_Tab.findChildren(QtGui.QLineEdit)[((idx + 1) / 3) - 1].placeholderText())])
#Check for duplicates in entries (Vensim Variable is assigned twice,
#e.g. Table Row 1: 'albedo' | 'albedo0ad' | 0; Table Row 2: 'albedo' | 'albedo1000ad' | 0 --> albedo at time 0 can only have one value!)
table_links_reduced = [x[::2] for x in table_links]
dupl_count = 0
for idx, itm in enumerate(table_links_reduced):
for l in table_links_reduced:
if cmp(itm, l) == 0:
dupl_count+=1
if dupl_count > len(table_links_reduced):
self.error_message9 = QtGui.QMessageBox(self.centralwidget)
self.error_message9.setIcon(QtGui.QMessageBox.Critical)
self.error_message9.setWindowTitle("Input Error")
self.error_message9.setText("Time constant or time dependent value assignment is redundant! Change time settings. Then reset, compress and rerun simulation.")
self.error_message9.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message9.exec_()
###Input error 'time origin > 0' is not implemented yet!!!
else:
#Make progress bar visible once run button is pushed
self.Progress_Run.setVisible(True)
#Add column, one per stock in the SD model, type array; also count rows in the table
curs = self.con.cursor()
for i in self.model.components._stocknames:
curs.execute("ALTER TABLE sim_out ADD COLUMN %s real[]" % i) #real is 4 bytes per array element, double precision would be 8 bytes
self.con.commit()
#Count rows of sim_out
row_count = 0.0
curs.execute(""" SELECT count(*) FROM sim_out; """)
row_count = curs.fetchall()[0][0]
#Get name of subscripting table
subscripting_table = []
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Subscript":
subscripting_table.append(self.tb_columns[self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentIndex()][1])
subscripting_table = list(set(subscripting_table))
#Fetch run, save
g_count = 0.0
for g in xrange(1,(row_count + 1)):
start = time.time()
pvars = [x[1] for x in table_links]
post_vars = str(pvars).replace('[', '').replace(']', '').replace("'", "")
SQL = ''' SELECT %s FROM %s WHERE gid = %s; ''' % (AsIs(post_vars), AsIs(subscripting_table[0]), g)
curs.execute(SQL)
self.con.commit()
post_data = curs.fetchall()
#curs.execute("""DROP TABLE tempo;""")
#Append Fetched PostGIS data to the tables link list [[VENSIM Var. Name 1, PostGIS Var. Name 1, Time1, PostGIS Value 1], [VENSIM Var. Name 2, PostGIS Var. Name 2, Time2, PostGIS Value 2], [...], ...]
for idx, itm in enumerate(table_links):
itm.append(post_data[0][idx])
#Set time constant parameters in Vensim model
for x in table_links:
if x[2] == "<Default is constant>":
self.model.set_components(params={x[0]:x[3]})
#end = time.time()
#print "Fetch " + str((end - start))
#start = time.time()
#Set time dependent parameters in Vensim model. Values are linearly interpolated between time dependent inputs
for v in list(set([x[0] for x in table_links if x[2].isalnum()])):
pandas_time = []
pandas_data = []
for x in table_links:
if (x[2].isalnum()) and (x[0] == v):
pandas_time.append(x[2])
pandas_data.append(x[3])
pandas_time_float = [float(x) for x in pandas_time]
pandas_data_float = [float(x) for x in pandas_data]
pandas_data_sorted = [x for (y,x) in sorted(zip(pandas_time_float,pandas_data_float))] #sort pandas data by time
pandas_time_sorted = sorted(pandas_time_float)
look = pd.Series(index=pandas_time_sorted, data=pandas_data_sorted)
self.model.set_components(params={v:look})
#Run simulation for one collection element
st = self.model.run()
#end = time.time()
#print "Run " + str((end - start))
#Clear data value for next spatial object simulation run
for e in table_links:
del e[-1]
#start = time.time()
#save to database
for col in self.model.components._stocknames:
pd_lst = st[col].tolist()
curs.execute("UPDATE sim_out SET (%s) = (%s) WHERE gid = %s", (AsIs(col), pd_lst, g))
self.con.commit()
#Update progress
g_count += 1
complete = g_count / row_count * 100
self.Progress_Run.setValue(complete)
QtGui.QApplication.processEvents() #refresh application
end = time.time()
print "1 FRS " + str((end - start))
#Check for number of time series links
series_count = 0
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Time Series":
series_count += 1
#If link schema includes time series links only, then run and save to sim_out
if (subscripting_count == 0) and (series_count > 0) and (bool_tab_exist == 0):
#Make progress bar visible once run button is pushed
self.Progress_Run.setVisible(True)
#create sim_out table
curs = self.con.cursor()
curs.execute(""" CREATE TABLE sim_out (gid BIGSERIAL PRIMARY KEY); """)
self.con.commit()
for i in self.model.components._stocknames:
curs.execute("ALTER TABLE sim_out ADD COLUMN %s numeric" % i) #real is 4 bytes per array element, double precision would be 8 bytes
self.con.commit()
del curs
#Run
st = self.model.run()
#Save
curs = self.con.cursor()
stocks_str = str(self.model.components._stocknames).replace("[", "").replace("]", "").replace("'", "")
for idx, row in st.iterrows():
groups = ()
for v in row:
groups += (v,)
curs.execute("INSERT INTO sim_out (%s) VALUES %s" % (stocks_str, groups))
self.con.commit()
del curs
self.Progress_Run.setValue(100)
try:
del curs
except:
pass
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
app.setStyle(QtGui.QStyleFactory.create("plastique"))
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow() #Create instance of class Ui_MainWindow() defined above
ui.setupUi(MainWindow) #The class Ui_MainWindow inherits from class "QtGui.QMainWindow()" using method setupUi (see definition of method setupUi above)
MainWindow.show()
sys.exit(app.exec_())
| agpl-3.0 |
mcsosa121/cafa | cafaenv/lib/python2.7/site-packages/wheel/signatures/djbec.py | 566 | 6755 | # Ed25519 digital signatures
# Based on http://ed25519.cr.yp.to/python/ed25519.py
# See also http://ed25519.cr.yp.to/software.html
# Adapted by Ron Garret
# Sped up considerably using coordinate transforms found on:
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
# Specifically add-2008-hwcd-4 and dbl-2008-hwcd
try: # pragma nocover
unicode
PY3 = False
def asbytes(b):
"""Convert array of integers to byte string"""
return ''.join(chr(x) for x in b)
def joinbytes(b):
"""Convert array of bytes to byte string"""
return ''.join(b)
def bit(h, i):
"""Return i'th bit of bytestring h"""
return (ord(h[i//8]) >> (i%8)) & 1
except NameError: # pragma nocover
PY3 = True
asbytes = bytes
joinbytes = bytes
def bit(h, i):
return (h[i//8] >> (i%8)) & 1
import hashlib
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def expmod(b, e, m):
if e == 0: return 1
t = expmod(b, e // 2, m) ** 2 % m
if e & 1: t = (t * b) % m
return t
# Can probably get some extra speedup here by replacing this with
# an extended-euclidean, but performance seems OK without that
def inv(x):
return expmod(x, q-2, q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)//4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)//8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
#def edwards(P,Q):
# x1 = P[0]
# y1 = P[1]
# x2 = Q[0]
# y2 = Q[1]
# x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
# y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
# return (x3 % q,y3 % q)
#def scalarmult(P,e):
# if e == 0: return [0,1]
# Q = scalarmult(P,e/2)
# Q = edwards(Q,Q)
# if e & 1: Q = edwards(Q,P)
# return Q
# Faster (!) version based on:
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
def xpt_add(pt1, pt2):
(X1, Y1, Z1, T1) = pt1
(X2, Y2, Z2, T2) = pt2
A = ((Y1-X1)*(Y2+X2)) % q
B = ((Y1+X1)*(Y2-X2)) % q
C = (Z1*2*T2) % q
D = (T1*2*Z2) % q
E = (D+C) % q
F = (B-A) % q
G = (B+A) % q
H = (D-C) % q
X3 = (E*F) % q
Y3 = (G*H) % q
Z3 = (F*G) % q
T3 = (E*H) % q
return (X3, Y3, Z3, T3)
def xpt_double (pt):
(X1, Y1, Z1, _) = pt
A = (X1*X1)
B = (Y1*Y1)
C = (2*Z1*Z1)
D = (-A) % q
J = (X1+Y1) % q
E = (J*J-A-B) % q
G = (D+B) % q
F = (G-C) % q
H = (D-B) % q
X3 = (E*F) % q
Y3 = (G*H) % q
Z3 = (F*G) % q
T3 = (E*H) % q
return (X3, Y3, Z3, T3)
def pt_xform (pt):
(x, y) = pt
return (x, y, 1, (x*y)%q)
def pt_unxform (pt):
(x, y, z, _) = pt
return ((x*inv(z))%q, (y*inv(z))%q)
def xpt_mult (pt, n):
if n==0: return pt_xform((0,1))
_ = xpt_double(xpt_mult(pt, n>>1))
return xpt_add(_, pt) if n&1 else _
def scalarmult(pt, e):
return pt_unxform(xpt_mult(pt_xform(pt), e))
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
e = [(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)]
return asbytes(e)
def encodepoint(P):
x = P[0]
y = P[1]
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
e = [(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)]
return asbytes(e)
def publickey(sk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
A = scalarmult(B,a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2**i * bit(h,i) for i in range(2*b))
def signature(m,sk,pk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
inter = joinbytes([h[i] for i in range(b//8,b//4)])
r = Hint(inter + m)
R = scalarmult(B,r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
x = P[0]
y = P[1]
return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0
def decodeint(s):
return sum(2**i * bit(s,i) for i in range(0,b))
def decodepoint(s):
y = sum(2**i * bit(s,i) for i in range(0,b-1))
x = xrecover(y)
if x & 1 != bit(s,b-1): x = q-x
P = [x,y]
if not isoncurve(P): raise Exception("decoding point that is not on curve")
return P
def checkvalid(s, m, pk):
if len(s) != b//4: raise Exception("signature length is wrong")
if len(pk) != b//8: raise Exception("public-key length is wrong")
R = decodepoint(s[0:b//8])
A = decodepoint(pk)
S = decodeint(s[b//8:b//4])
h = Hint(encodepoint(R) + pk + m)
v1 = scalarmult(B,S)
# v2 = edwards(R,scalarmult(A,h))
v2 = pt_unxform(xpt_add(pt_xform(R), pt_xform(scalarmult(A, h))))
return v1==v2
##########################################################
#
# Curve25519 reference implementation by Matthew Dempsky, from:
# http://cr.yp.to/highspeed/naclcrypto-20090310.pdf
# P = 2 ** 255 - 19
P = q
A = 486662
#def expmod(b, e, m):
# if e == 0: return 1
# t = expmod(b, e / 2, m) ** 2 % m
# if e & 1: t = (t * b) % m
# return t
# def inv(x): return expmod(x, P - 2, P)
def add(n, m, d):
(xn, zn) = n
(xm, zm) = m
(xd, zd) = d
x = 4 * (xm * xn - zm * zn) ** 2 * zd
z = 4 * (xm * zn - zm * xn) ** 2 * xd
return (x % P, z % P)
def double(n):
(xn, zn) = n
x = (xn ** 2 - zn ** 2) ** 2
z = 4 * xn * zn * (xn ** 2 + A * xn * zn + zn ** 2)
return (x % P, z % P)
def curve25519(n, base=9):
one = (base,1)
two = double(one)
# f(m) evaluates to a tuple
# containing the mth multiple and the
# (m+1)th multiple of base.
def f(m):
if m == 1: return (one, two)
(pm, pm1) = f(m // 2)
if (m & 1):
return (add(pm, pm1, one), double(pm1))
return (double(pm), add(pm, pm1, one))
((x,z), _) = f(n)
return (x * inv(z)) % P
import random
def genkey(n=0):
n = n or random.randint(0,P)
n &= ~7
n &= ~(128 << 8 * 31)
n |= 64 << 8 * 31
return n
#def str2int(s):
# return int(hexlify(s), 16)
# # return sum(ord(s[i]) << (8 * i) for i in range(32))
#
#def int2str(n):
# return unhexlify("%x" % n)
# # return ''.join([chr((n >> (8 * i)) & 255) for i in range(32)])
#################################################
def dsa_test():
import os
msg = str(random.randint(q,q+q)).encode('utf-8')
sk = os.urandom(32)
pk = publickey(sk)
sig = signature(msg, sk, pk)
return checkvalid(sig, msg, pk)
def dh_test():
sk1 = genkey()
sk2 = genkey()
return curve25519(sk1, curve25519(sk2)) == curve25519(sk2, curve25519(sk1))
| mit |
beeftornado/sentry | tests/sentry/receivers/test_signals.py | 1 | 1275 | from __future__ import absolute_import
from django.utils import timezone
from sentry.signals import issue_unignored
from sentry.testutils import SnubaTestCase, TestCase
from sentry.utils.compat.mock import patch
class SignalsTest(TestCase, SnubaTestCase):
def setUp(self):
super(SignalsTest, self).setUp()
self.now = timezone.now()
self.owner = self.create_user()
self.organization = self.create_organization(owner=self.owner)
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(teams=[self.team])
@patch("sentry.analytics.record")
def test_unignored_manually(self, mock_record):
issue_unignored.send(
project=self.project,
group=self.group,
user=self.owner,
transition_type="manual",
sender=type(self.project),
)
assert mock_record.called
@patch("sentry.analytics.record")
def test_unignored_automatically(self, mock_record):
issue_unignored.send(
project=self.project,
group=self.group,
user=None,
transition_type="automatic",
sender="clear_expired_snoozes",
)
assert mock_record.called
| bsd-3-clause |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/psycopg2/tests/testutils.py | 25 | 10393 | # testutils.py - utility module for psycopg2 testing.
#
# Copyright (C) 2010-2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
# Use unittest2 if available. Otherwise mock a skip facility with warnings.
import os
import platform
import sys
from functools import wraps
from testconfig import dsn
try:
import unittest2
unittest = unittest2
except ImportError:
import unittest
unittest2 = None
if hasattr(unittest, 'skipIf'):
skip = unittest.skip
skipIf = unittest.skipIf
else:
import warnings
def skipIf(cond, msg):
def skipIf_(f):
@wraps(f)
def skipIf__(self):
if cond:
warnings.warn(msg)
return
else:
return f(self)
return skipIf__
return skipIf_
def skip(msg):
return skipIf(True, msg)
def skipTest(self, msg):
warnings.warn(msg)
return
unittest.TestCase.skipTest = skipTest
# Silence warnings caused by the stubbornness of the Python unittest
# maintainers
# http://bugs.python.org/issue9424
if not hasattr(unittest.TestCase, 'assert_') \
or unittest.TestCase.assert_ is not unittest.TestCase.assertTrue:
# mavaff...
unittest.TestCase.assert_ = unittest.TestCase.assertTrue
unittest.TestCase.failUnless = unittest.TestCase.assertTrue
unittest.TestCase.assertEquals = unittest.TestCase.assertEqual
unittest.TestCase.failUnlessEqual = unittest.TestCase.assertEqual
class ConnectingTestCase(unittest.TestCase):
"""A test case providing connections for tests.
A connection for the test is always available as `self.conn`. Others can be
created with `self.connect()`. All are closed on tearDown.
Subclasses needing to customize setUp and tearDown should remember to call
the base class implementations.
"""
def setUp(self):
self._conns = []
def tearDown(self):
# close the connections used in the test
for conn in self._conns:
if not conn.closed:
conn.close()
def connect(self, **kwargs):
try:
self._conns
except AttributeError, e:
raise AttributeError(
"%s (did you remember calling ConnectingTestCase.setUp()?)"
% e)
import psycopg2
conn = psycopg2.connect(dsn, **kwargs)
self._conns.append(conn)
return conn
def _get_conn(self):
if not hasattr(self, '_the_conn'):
self._the_conn = self.connect()
return self._the_conn
def _set_conn(self, conn):
self._the_conn = conn
conn = property(_get_conn, _set_conn)
def decorate_all_tests(cls, *decorators):
"""
Apply all the *decorators* to all the tests defined in the TestCase *cls*.
"""
for n in dir(cls):
if n.startswith('test'):
for d in decorators:
setattr(cls, n, d(getattr(cls, n)))
def skip_if_no_uuid(f):
"""Decorator to skip a test if uuid is not supported by Py/PG."""
@wraps(f)
def skip_if_no_uuid_(self):
try:
import uuid
except ImportError:
return self.skipTest("uuid not available in this Python version")
try:
cur = self.conn.cursor()
cur.execute("select typname from pg_type where typname = 'uuid'")
has = cur.fetchone()
finally:
self.conn.rollback()
if has:
return f(self)
else:
return self.skipTest("uuid type not available on the server")
return skip_if_no_uuid_
def skip_if_tpc_disabled(f):
"""Skip a test if the server has tpc support disabled."""
@wraps(f)
def skip_if_tpc_disabled_(self):
from psycopg2 import ProgrammingError
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("SHOW max_prepared_transactions;")
except ProgrammingError:
return self.skipTest(
"server too old: two phase transactions not supported.")
else:
mtp = int(cur.fetchone()[0])
cnn.close()
if not mtp:
return self.skipTest(
"server not configured for two phase transactions. "
"set max_prepared_transactions to > 0 to run the test")
return f(self)
return skip_if_tpc_disabled_
def skip_if_no_namedtuple(f):
@wraps(f)
def skip_if_no_namedtuple_(self):
try:
from collections import namedtuple
except ImportError:
return self.skipTest("collections.namedtuple not available")
else:
return f(self)
return skip_if_no_namedtuple_
def skip_if_no_iobase(f):
"""Skip a test if io.TextIOBase is not available."""
@wraps(f)
def skip_if_no_iobase_(self):
try:
from io import TextIOBase
except ImportError:
return self.skipTest("io.TextIOBase not found.")
else:
return f(self)
return skip_if_no_iobase_
def skip_before_postgres(*ver):
"""Skip a test on PostgreSQL before a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_before_postgres_(f):
@wraps(f)
def skip_before_postgres__(self):
if self.conn.server_version < int("%d%02d%02d" % ver):
return self.skipTest("skipped because PostgreSQL %s"
% self.conn.server_version)
else:
return f(self)
return skip_before_postgres__
return skip_before_postgres_
def skip_after_postgres(*ver):
"""Skip a test on PostgreSQL after (including) a certain version."""
ver = ver + (0,) * (3 - len(ver))
def skip_after_postgres_(f):
@wraps(f)
def skip_after_postgres__(self):
if self.conn.server_version >= int("%d%02d%02d" % ver):
return self.skipTest("skipped because PostgreSQL %s"
% self.conn.server_version)
else:
return f(self)
return skip_after_postgres__
return skip_after_postgres_
def skip_before_python(*ver):
"""Skip a test on Python before a certain version."""
def skip_before_python_(f):
@wraps(f)
def skip_before_python__(self):
if sys.version_info[:len(ver)] < ver:
return self.skipTest("skipped because Python %s"
% ".".join(map(str, sys.version_info[:len(ver)])))
else:
return f(self)
return skip_before_python__
return skip_before_python_
def skip_from_python(*ver):
"""Skip a test on Python after (including) a certain version."""
def skip_from_python_(f):
@wraps(f)
def skip_from_python__(self):
if sys.version_info[:len(ver)] >= ver:
return self.skipTest("skipped because Python %s"
% ".".join(map(str, sys.version_info[:len(ver)])))
else:
return f(self)
return skip_from_python__
return skip_from_python_
def skip_if_no_superuser(f):
"""Skip a test if the database user running the test is not a superuser"""
@wraps(f)
def skip_if_no_superuser_(self):
from psycopg2 import ProgrammingError
try:
return f(self)
except ProgrammingError, e:
import psycopg2.errorcodes
if e.pgcode == psycopg2.errorcodes.INSUFFICIENT_PRIVILEGE:
self.skipTest("skipped because not superuser")
else:
raise
return skip_if_no_superuser_
def skip_if_green(reason):
def skip_if_green_(f):
@wraps(f)
def skip_if_green__(self):
from testconfig import green
if green:
return self.skipTest(reason)
else:
return f(self)
return skip_if_green__
return skip_if_green_
skip_copy_if_green = skip_if_green("copy in async mode currently not supported")
def skip_if_no_getrefcount(f):
@wraps(f)
def skip_if_no_getrefcount_(self):
if not hasattr(sys, 'getrefcount'):
return self.skipTest('skipped, no sys.getrefcount()')
else:
return f(self)
return skip_if_no_getrefcount_
def skip_if_windows(f):
"""Skip a test if run on windows"""
@wraps(f)
def skip_if_windows_(self):
if platform.system() == 'Windows':
return self.skipTest("Not supported on Windows")
else:
return f(self)
return skip_if_windows_
def script_to_py3(script):
"""Convert a script to Python3 syntax if required."""
if sys.version_info[0] < 3:
return script
import tempfile
f = tempfile.NamedTemporaryFile(suffix=".py", delete=False)
f.write(script.encode())
f.flush()
filename = f.name
f.close()
# 2to3 is way too chatty
import logging
logging.basicConfig(filename=os.devnull)
from lib2to3.main import main
if main("lib2to3.fixes", ['--no-diffs', '-w', '-n', filename]):
raise Exception('py3 conversion failed')
f2 = open(filename)
try:
return f2.read()
finally:
f2.close()
os.remove(filename)
class py3_raises_typeerror(object):
def __enter__(self):
pass
def __exit__(self, type, exc, tb):
if sys.version_info[0] >= 3:
assert type is TypeError
return True
| gpl-2.0 |
Fl0rianFischer/sme_odoo | addons/hr_equipment/models/hr_equipment.py | 21 | 15888 | # -*- coding: utf-8 -*-
from openerp import api, fields, models, fields
from openerp import _, tools
from openerp.exceptions import UserError
class HrEquipmentStage(models.Model):
""" Model for case stages. This models the main stages of a Maintenance Request management flow. """
_name = 'hr.equipment.stage'
_description = 'Maintenance Stage'
_order = 'sequence, id'
name = fields.Char('Name', required=True, translate=True)
sequence = fields.Integer('Sequence', default=20)
fold = fields.Boolean('Folded in Recruitment Pipe')
done = fields.Boolean('Request Done')
class HrEquipmentCategory(models.Model):
_name = 'hr.equipment.category'
_inherits = {"mail.alias": "alias_id"}
_inherit = ['mail.thread']
_description = 'Asset Category'
@api.one
@api.depends('equipment_ids')
def _compute_fold(self):
self.fold = False if self.equipment_count else True
name = fields.Char('Category Name', required=True, translate=True)
user_id = fields.Many2one('res.users', 'Responsible', track_visibility='onchange', default=lambda self: self.env.uid)
color = fields.Integer('Color Index')
note = fields.Text('Comments', translate=True)
equipment_ids = fields.One2many('hr.equipment', 'category_id', string='Equipments', copy=False)
equipment_count = fields.Integer(string="Equipment", compute='_compute_equipment_count')
maintenance_ids = fields.One2many('hr.equipment.request', 'category_id', copy=False)
maintenance_count = fields.Integer(string="Maintenance", compute='_compute_maintenance_count')
alias_id = fields.Many2one(
'mail.alias', 'Alias', ondelete='cascade', required=True,
help="Email alias for this equipment category. New emails will automatically "
"create new maintenance request for this equipment category.")
fold = fields.Boolean(string='Folded in Maintenance Pipe', compute='_compute_fold', store=True)
@api.multi
def _compute_equipment_count(self):
equipment_data = self.env['hr.equipment'].read_group([('category_id', 'in', self.ids)], ['category_id'], ['category_id'])
mapped_data = dict([(m['category_id'][0], m['category_id_count']) for m in equipment_data])
for category in self:
category.equipment_count = mapped_data.get(category.id, 0)
@api.multi
def _compute_maintenance_count(self):
maintenance_data = self.env['hr.equipment.request'].read_group([('category_id', 'in', self.ids)], ['category_id'], ['category_id'])
mapped_data = dict([(m['category_id'][0], m['category_id_count']) for m in maintenance_data])
for category in self:
category.maintenance_count = mapped_data.get(category.id, 0)
@api.model
def create(self, vals):
self = self.with_context(alias_model_name='hr.equipment.request', alias_parent_model_name=self._name)
category_id = super(HrEquipmentCategory, self).create(vals)
category_id.alias_id.write({'alias_parent_thread_id': category_id.id, 'alias_defaults': {'category_id': category_id.id}})
return category_id
@api.multi
def unlink(self):
for category in self:
if category.equipment_ids or category.maintenance_ids:
raise UserError(_("You cannot delete an equipment category containing equipments or maintenance requests."))
res = super(HrEquipmentCategory, self).unlink()
return res
class HrEquipment(models.Model):
_name = 'hr.equipment'
_inherit = ['mail.thread']
_description = 'Equipment'
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if ('employee_id' in init_values and self.employee_id) or ('department_id' in init_values and self.department_id):
return 'hr_equipment.mt_mat_assign'
return super(HrEquipment, self)._track_subtype(init_values)
@api.multi
def name_get(self):
result = []
for record in self:
if record.name and record.serial_no:
result.append((record.id, record.name + '/' + record.serial_no))
if record.name and not record.serial_no:
result.append((record.id, record.name))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
recs = self.browse()
if name:
recs = self.search([('name', '=', name)] + args, limit=limit)
if not recs:
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get()
name = fields.Char('Asset Name', required=True, translate=True)
user_id = fields.Many2one('res.users', string='Technician', track_visibility='onchange')
employee_id = fields.Many2one('hr.employee', string='Assigned to Employee', track_visibility='onchange')
department_id = fields.Many2one('hr.department', string='Assigned to Department', track_visibility='onchange')
category_id = fields.Many2one('hr.equipment.category', string='Asset Category', track_visibility='onchange')
partner_id = fields.Many2one('res.partner', string='Vendor', domain="[('supplier', '=', 1)]")
partner_ref = fields.Char('Vendor Reference')
location = fields.Char('Location')
model = fields.Char('Model')
serial_no = fields.Char('Serial Number', copy=False)
assign_date = fields.Date('Assigned Date', track_visibility='onchange')
cost = fields.Float('Cost')
note = fields.Text('Note')
warranty = fields.Date('Warranty')
color = fields.Integer('Color Index')
scrap_date = fields.Date('Scrap Date')
equipment_assign_to = fields.Selection(
[('department', 'Department'), ('employee', 'Employee')],
string='Used By',
required=True,
default='employee')
maintenance_ids = fields.One2many('hr.equipment.request', 'equipment_id')
maintenance_count = fields.Integer(compute='_compute_maintenance_count', string="Maintenance", store=True)
maintenance_open_count = fields.Integer(compute='_compute_maintenance_count', string="Current Maintenance", store=True)
@api.one
@api.depends('maintenance_ids.stage_id.done')
def _compute_maintenance_count(self):
self.maintenance_count = len(self.maintenance_ids)
self.maintenance_open_count = len(self.maintenance_ids.filtered(lambda x: not x.stage_id.done))
@api.onchange('equipment_assign_to')
def _onchange_equipment_assign_to(self):
if self.equipment_assign_to == 'employee':
self.department_id = False
if self.equipment_assign_to == 'department':
self.employee_id = False
self.assign_date = fields.Date.context_today(self)
@api.onchange('category_id')
def _onchange_category_id(self):
self.user_id = self.category_id.user_id
_sql_constraints = [
('serial_no', 'unique(serial_no)', "Another asset already exists with this serial number!"),
]
@api.model
def create(self, vals):
equipment = super(HrEquipment, self).create(vals)
# subscribe employee or department manager when equipment assign to him.
user_ids = []
if equipment.employee_id and equipment.employee_id.user_id:
user_ids.append(equipment.employee_id.user_id.id)
if equipment.department_id and equipment.department_id.manager_id and equipment.department_id.manager_id.user_id:
user_ids.append(equipment.department_id.manager_id.user_id.id)
if user_ids:
equipment.message_subscribe_users(user_ids=user_ids)
return equipment
@api.multi
def write(self, vals):
user_ids = []
# subscribe employee or department manager when equipment assign to employee or department.
if vals.get('employee_id'):
user_id = self.env['hr.employee'].browse(vals['employee_id'])['user_id']
if user_id:
user_ids.append(user_id.id)
if vals.get('department_id'):
department = self.env['hr.department'].browse(vals['department_id'])
if department and department.manager_id and department.manager_id.user_id:
user_ids.append(department.manager_id.user_id.id)
if user_ids:
self.message_subscribe_users(user_ids=user_ids)
return super(HrEquipment, self).write(vals)
@api.multi
def _read_group_category_ids(self, domain, read_group_order=None, access_rights_uid=None):
""" Read group customization in order to display all the category in the
kanban view, even if they are empty
"""
category_obj = self.env['hr.equipment.category']
order = category_obj._order
access_rights_uid = access_rights_uid or self._uid
if read_group_order == 'category_id desc':
order = '%s desc' % order
category_ids = category_obj._search([], order=order, access_rights_uid=access_rights_uid)
result = [category.name_get()[0] for category in category_obj.browse(category_ids)]
# restore order of the search
result.sort(lambda x, y: cmp(category_ids.index(x[0]), category_ids.index(y[0])))
fold = {}
for category in category_obj.browse(category_ids):
fold[category.id] = category.fold
return result, fold
_group_by_full = {
'category_id': _read_group_category_ids
}
class HrEquipmentRequest(models.Model):
_name = 'hr.equipment.request'
_inherit = ['mail.thread']
_description = 'Maintenance Requests'
@api.returns('self')
def _default_employee_get(self):
return self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1)
@api.returns('self')
def _default_stage(self):
return self.env['hr.equipment.stage'].search([], limit=1)
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'stage_id' in init_values and self.stage_id.sequence <= 1:
return 'hr_equipment.mt_req_created'
elif 'stage_id' in init_values and self.stage_id.sequence > 1:
return 'hr_equipment.mt_req_status'
return super(HrEquipmentRequest, self)._track_subtype(init_values)
name = fields.Char('Subjects', required=True)
description = fields.Text('Description')
request_date = fields.Date('Request Date', track_visibility='onchange', default=fields.Date.context_today)
employee_id = fields.Many2one('hr.employee', string='Employee', default=_default_employee_get)
department_id = fields.Many2one('hr.department', string='Department')
category_id = fields.Many2one('hr.equipment.category', string='Category')
equipment_id = fields.Many2one('hr.equipment', string='Asset', select=True)
user_id = fields.Many2one('res.users', string='Assigned to', track_visibility='onchange')
stage_id = fields.Many2one('hr.equipment.stage', string='Stage', track_visibility='onchange', default=_default_stage)
priority = fields.Selection([('0', 'Very Low'), ('1', 'Low'), ('2', 'Normal'), ('3', 'High')], string='Priority')
color = fields.Integer('Color Index')
close_date = fields.Date('Close Date')
kanban_state = fields.Selection([('normal', 'In Progress'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')],
string='Kanban State', required=True, default='normal', track_visibility='onchange')
active = fields.Boolean(default=True, help="Set active to false to hide the maintenance request without deleting it.")
@api.multi
def archive_equipment_request(self):
self.write({'active': False})
@api.multi
def reset_equipment_request(self):
""" Reinsert the equipment request into the maintenance pipe in the first stage"""
first_stage_obj = self.env['hr.equipment.stage'].search([], order="sequence asc", limit=1)
self.write({'active': True, 'stage_id': first_stage_obj.id})
@api.onchange('employee_id', 'department_id')
def onchange_department_or_employee_id(self):
domain = []
if self.department_id:
domain = [('department_id', '=', self.department_id.id)]
if self.employee_id and self.department_id:
domain = ['|'] + domain
if self.employee_id:
domain = domain + ['|', ('employee_id', '=', self.employee_id.id), ('employee_id', '=', None)]
equipment = self.env['hr.equipment'].search(domain, limit=2)
if len(equipment) == 1:
self.equipment_id = equipment
return {'domain': {'equipment_id': domain}}
@api.onchange('equipment_id')
def onchange_equipment_id(self):
self.user_id = self.equipment_id.user_id if self.equipment_id.user_id else self.equipment_id.category_id.user_id
self.category_id = self.equipment_id.category_id
@api.onchange('category_id')
def onchange_category_id(self):
if not self.user_id or not self.equipment_id or (self.user_id and not self.equipment_id.user_id):
self.user_id = self.category_id.user_id
@api.model
def create(self, vals):
# context: no_log, because subtype already handle this
self = self.with_context(mail_create_nolog=True)
result = super(HrEquipmentRequest, self).create(vals)
if result.employee_id.user_id:
result.message_subscribe_users(user_ids=[result.employee_id.user_id.id])
return result
@api.multi
def write(self, vals):
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the Maintenance Request changes.
if vals and 'kanban_state' not in vals and 'stage_id' in vals:
vals['kanban_state'] = 'normal'
if vals.get('employee_id'):
employee = self.env['hr.employee'].browse(vals['employee_id'])
if employee and employee.user_id:
self.message_subscribe_users(user_ids=[employee.user_id.id])
return super(HrEquipmentRequest, self).write(vals)
@api.multi
def _read_group_stage_ids(self, domain, read_group_order=None, access_rights_uid=None):
""" Read group customization in order to display all the stages in the
kanban view, even if they are empty
"""
stage_obj = self.env['hr.equipment.stage']
order = stage_obj._order
access_rights_uid = access_rights_uid or self._uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
stage_ids = stage_obj._search([], order=order, access_rights_uid=access_rights_uid)
result = [stage.name_get()[0] for stage in stage_obj.browse(stage_ids)]
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(stage_ids):
fold[stage.id] = stage.fold or False
return result, fold
_group_by_full = {
'stage_id': _read_group_stage_ids
}
@api.model
def message_new(self, msg, custom_values=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
email = tools.email_split(msg.get('from')) and tools.email_split(msg.get('from'))[0] or False
user = self.env['res.users'].search([('login', '=', email)], limit=1)
if user:
employee = self.env['hr.employee'].search([('user_id', '=', user.id)], limit=1)
if employee:
custom_values['employee_id'] = employee and employee[0].id
return super(HrEquipmentRequest, self).message_new(msg, custom_values=custom_values)
| gpl-3.0 |
knowsis/django | django/conf/locale/sr/formats.py | 235 | 1981 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
ngsxfem/ngsxfem | tests/pytests/test_intcurved.py | 1 | 3514 | # integration on lset domains
from math import pi
# ngsolve stuff
from ngsolve import *
# basic xfem functionality
from xfem import *
# For LevelSetAdaptationMachinery
from xfem.lsetcurv import *
from ngsolve.meshes import *
import pytest
@pytest.mark.parametrize("quad", [False,True])
@pytest.mark.parametrize("order", [1,2,3])
def test_intcurved(quad, order):
levelset = sqrt(x*x+y*y)-0.5
referencevals = { POS : 4.0-0.25*pi, NEG : 0.25*pi, IF : pi }
N=4
errors_uncurved = dict()
errors_curved = dict()
eoc_uncurved = dict()
eoc_curved = dict()
for key in [NEG,POS,IF]:
errors_curved[key] = []
errors_uncurved[key] = []
eoc_curved[key] = []
eoc_uncurved[key] = []
refinements = 5
if order == 1:
refinements += 2
for reflevel in range(refinements):
mesh = MakeStructured2DMesh(quads = quad, nx=N*2**reflevel, ny=N*2**reflevel,
mapping = lambda x,y : (2*x-1,2*y-1))
lsetmeshadap = LevelSetMeshAdaptation(mesh, order=order, threshold=0.2, discontinuous_qn=True)
lsetp1 = lsetmeshadap.lset_p1
f = CoefficientFunction (1.0)
for key in [NEG,POS,IF]:
# Applying the mesh deformation
deformation = lsetmeshadap.CalcDeformation(levelset)
integrals_uncurved = Integrate(levelset_domain = { "levelset" : lsetp1, "domain_type" : key},
cf=f, mesh=mesh, order = order)
mesh.SetDeformation(deformation)
integrals_curved = Integrate(levelset_domain = { "levelset" : lsetp1, "domain_type" : key},
cf=f, mesh=mesh, order = order)
# Unapply the mesh deformation (for refinement)
mesh.UnsetDeformation()
errors_curved[key].append(abs(integrals_curved - referencevals[key]))
errors_uncurved[key].append(abs(integrals_uncurved - referencevals[key]))
# refine cut elements:
# if not quad:
# RefineAtLevelSet(gf=lsetmeshadap.lset_p1)
for key in [NEG,POS,IF]:
eoc_curved[key] = [log(a/b)/log(2) for (a,b) in zip (errors_curved[key][0:-1],errors_curved[key][1:]) ]
eoc_uncurved[key] = [log(a/b)/log(2) for (a,b) in zip (errors_uncurved[key][0:-1],errors_uncurved[key][1:]) ]
# print("errors (uncurved): \n{}\n".format(errors_uncurved))
# print(" eoc (uncurved): \n{}\n".format( eoc_uncurved))
print("errors ( curved): \n{}\n".format( errors_curved))
print(" eoc ( curved): \n{}\n".format( eoc_curved))
print("avg.eoc(curved, IF): \n{}\n".format( sum(eoc_curved[IF][2:])/len(eoc_curved[IF][2:])))
print("avg.eoc(curved,NEG): \n{}\n".format( sum(eoc_curved[NEG][2:])/len(eoc_curved[NEG][2:])))
print("avg.eoc(curved,POS): \n{}\n".format( sum(eoc_curved[POS][2:])/len(eoc_curved[POS][2:])))
if (order > 1):
assert errors_curved[IF][-1] < 1e-5
assert errors_curved[NEG][-1] < 1e-5
assert errors_curved[POS][-1] < 1e-5
else:
assert errors_curved[IF][-1] < 1e-4
assert errors_curved[NEG][-1] < 1e-4
assert errors_curved[POS][-1] < 1e-4
s = 0
if order == 1:
s+=2
assert sum(eoc_curved[IF][s:])/len(eoc_curved[IF][s:]) > order + 0.75
assert sum(eoc_curved[NEG][s:])/len(eoc_curved[NEG][s:]) > order + 0.75
assert sum(eoc_curved[POS][s:])/len(eoc_curved[POS][s:]) > order + 0.75
| lgpl-3.0 |
mancoast/CPythonPyc_test | cpython/254_test_copy.py | 10 | 17720 | """Unit tests for the copy module."""
import sys
import copy
import copy_reg
import unittest
from test import test_support
class TestCopy(unittest.TestCase):
# Attempt full line coverage of copy.py from top to bottom
def test_exceptions(self):
self.assert_(copy.Error is copy.error)
self.assert_(issubclass(copy.Error, Exception))
# The copy() method
def test_copy_basic(self):
x = 42
y = copy.copy(x)
self.assertEqual(x, y)
def test_copy_copy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
x = C(42)
y = copy.copy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_copy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.copy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.copy(x)
def test_copy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.copy, x)
# Type-specific _copy_xxx() methods
def test_copy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.copy(x) is x, repr(x))
def test_copy_list(self):
x = [1, 2, 3]
self.assertEqual(copy.copy(x), x)
def test_copy_tuple(self):
x = (1, 2, 3)
self.assertEqual(copy.copy(x), x)
def test_copy_dict(self):
x = {"foo": 1, "bar": 2}
self.assertEqual(copy.copy(x), x)
def test_copy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_copy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
# The deepcopy() method
def test_deepcopy_basic(self):
x = 42
y = copy.deepcopy(x)
self.assertEqual(y, x)
def test_deepcopy_memo(self):
# Tests of reflexive objects are under type-specific sections below.
# This tests only repetitions of objects.
x = []
x = [x, x]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0] is y[1])
def test_deepcopy_issubclass(self):
# XXX Note: there's no way to test the TypeError coming out of
# issubclass() -- this can only happen when an extension
# module defines a "type" that doesn't formally inherit from
# type.
class Meta(type):
pass
class C:
__metaclass__ = Meta
self.assertEqual(copy.deepcopy(C), C)
def test_deepcopy_deepcopy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo=None):
return C(self.foo)
x = C(42)
y = copy.deepcopy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_deepcopy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.deepcopy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.deepcopy(x)
def test_deepcopy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.deepcopy, x)
# Type-specific _deepcopy_xxx() methods
def test_deepcopy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.deepcopy(x) is x, repr(x))
def test_deepcopy_list(self):
x = [[1, 2], 3]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_list(self):
x = []
x.append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y[0] is y)
self.assertEqual(len(y), 1)
def test_deepcopy_tuple(self):
x = ([1, 2], 3)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_tuple(self):
x = ([],)
x[0].append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0][0] is y)
def test_deepcopy_dict(self):
x = {"foo": [1, 2], "bar": 3}
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x["foo"] is not y["foo"])
def test_deepcopy_reflexive_dict(self):
x = {}
x['foo'] = x
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y['foo'] is y)
self.assertEqual(len(y), 1)
def test_deepcopy_keepalive(self):
memo = {}
x = 42
y = copy.deepcopy(x, memo)
self.assert_(memo[id(x)] is x)
def test_deepcopy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_deepcopy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo):
return C(copy.deepcopy(self.foo, memo))
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_reflexive_inst(self):
class C:
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assert_(y is not x)
self.assert_(y.foo is y)
# _reconstruct()
def test_reconstruct_string(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
y = copy.deepcopy(x)
self.assert_(y is x)
def test_reconstruct_nostate(self):
class C(object):
def __reduce__(self):
return (C, ())
x = C()
x.foo = 42
y = copy.copy(x)
self.assert_(y.__class__ is x.__class__)
y = copy.deepcopy(x)
self.assert_(y.__class__ is x.__class__)
def test_reconstruct_state(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_reconstruct_state_setstate(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_reconstruct_reflexive(self):
class C(object):
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assert_(y is not x)
self.assert_(y.foo is y)
# Additions for Python 2.3 and pickle protocol 2
def test_reduce_4tuple(self):
class C(list):
def __reduce__(self):
return (C, (), self.__dict__, iter(self))
def __cmp__(self, other):
return (cmp(list(self), list(other)) or
cmp(self.__dict__, other.__dict__))
x = C([[1, 2], 3])
y = copy.copy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x[0] is y[0])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_reduce_5tuple(self):
class C(dict):
def __reduce__(self):
return (C, (), self.__dict__, None, self.iteritems())
def __cmp__(self, other):
return (cmp(dict(self), list(dict)) or
cmp(self.__dict__, other.__dict__))
x = C([("foo", [1, 2]), ("bar", 3)])
y = copy.copy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x["foo"] is y["foo"])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x["foo"] is not y["foo"])
def test_copy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.copy(x)
self.assert_(x.foo is y.foo)
def test_deepcopy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.deepcopy(x)
self.assertEqual(x.foo, y.foo)
self.assert_(x.foo is not y.foo)
def test_copy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.copy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assert_(x[0] is y[0])
self.assert_(x.foo is y.foo)
def test_deepcopy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.deepcopy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assert_(x[0] is not y[0])
self.assert_(x.foo is not y.foo)
def test_copy_tuple_subclass(self):
class C(tuple):
pass
x = C([1, 2, 3])
self.assertEqual(tuple(x), (1, 2, 3))
y = copy.copy(x)
self.assertEqual(tuple(y), (1, 2, 3))
def test_deepcopy_tuple_subclass(self):
class C(tuple):
pass
x = C([[1, 2], 3])
self.assertEqual(tuple(x), ([1, 2], 3))
y = copy.deepcopy(x)
self.assertEqual(tuple(y), ([1, 2], 3))
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_getstate_exc(self):
class EvilState(object):
def __getstate__(self):
raise ValueError, "ain't got no stickin' state"
self.assertRaises(ValueError, copy.copy, EvilState())
def test_copy_function(self):
self.assertEqual(copy.copy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.copy(foo), foo)
bar = lambda: None
self.assertEqual(copy.copy(bar), bar)
def test_deepcopy_function(self):
self.assertEqual(copy.deepcopy(global_foo), global_foo)
def foo(x, y): return x+y
self.assertEqual(copy.deepcopy(foo), foo)
bar = lambda: None
self.assertEqual(copy.deepcopy(bar), bar)
def global_foo(x, y): return x+y
def test_main():
test_support.run_unittest(TestCopy)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
mirror/vbox | src/VBox/ValidationKit/testmanager/core/schedulerbeci.py | 4 | 5171 | # -*- coding: utf-8 -*-
# $Id$
"""
Test Manager - Best-Effort-Continous-Itegration (BECI) scheduler.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Validation Kit imports.
from testmanager.core.schedulerbase import SchedulerBase, SchedQueueData;
class SchdulerBeci(SchedulerBase): # pylint: disable=R0903
"""
The best-effort-continous-itegration scheduler, BECI for short.
"""
def __init__(self, oDb, oSchedGrpData, iVerbosity, tsSecStart):
SchedulerBase.__init__(self, oDb, oSchedGrpData, iVerbosity, tsSecStart);
def _recreateQueueItems(self, oData):
#
# Prepare the input data for the loop below. We compress the priority
# to reduce the number of loops we need to executes below.
#
# Note! For BECI test group priority only applies to the ordering of
# test groups, which has been resolved by the done sorting in the
# base class.
#
iMinPriority = 0x7fff;
iMaxPriority = 0;
for oTestGroup in oData.aoTestGroups:
for oTestCase in oTestGroup.aoTestCases:
iPrio = oTestCase.iSchedPriority;
assert iPrio in range(32);
iPrio = iPrio / 4;
assert iPrio in range(8);
if iPrio > iMaxPriority:
iMaxPriority = iPrio;
if iPrio < iMinPriority:
iMinPriority = iPrio;
oTestCase.iBeciPrio = iPrio;
oTestCase.iNextVariation = -1;
assert iMinPriority in range(8);
assert iMaxPriority in range(8);
assert iMinPriority <= iMaxPriority;
#
# Generate the
#
cMaxItems = len(oData.aoArgsVariations) * 64;
if cMaxItems > 1048576:
cMaxItems = 1048576;
aoItems = list();
cNotAtEnd = len(oData.aoTestCases);
while len(aoItems) < cMaxItems:
self.msgDebug('outer loop: %s items' % (len(aoItems),));
for iPrio in range(iMaxPriority, iMinPriority - 1, -1):
#self.msgDebug('prio loop: %s' % (iPrio,));
for oTestGroup in oData.aoTestGroups:
#self.msgDebug('testgroup loop: %s' % (oTestGroup,));
for oTestCase in oTestGroup.aoTestCases:
self.msgDebug('testcase loop: idTestCase=%s' % (oTestCase.idTestCase,));
if iPrio <= oTestCase.iBeciPrio and len(oTestCase.aoArgsVariations) > 0:
# Get variation.
iNext = oTestCase.iNextVariation;
if iNext != 0:
if iNext == -1: iNext = 0;
cNotAtEnd -= 1;
oArgsVariation = oTestCase.aoArgsVariations[iNext];
# Update next variation.
iNext = (iNext + 1) % len(oTestCase.aoArgsVariations);
cNotAtEnd += iNext != 0;
oTestCase.iNextVariation = iNext;
# Create queue item and append it.
oItem = SchedQueueData();
oItem.initFromValues(idSchedGroup = self._oSchedGrpData.idSchedGroup,
idGenTestCaseArgs = oArgsVariation.idGenTestCaseArgs,
idTestGroup = oTestGroup.idTestGroup,
aidTestGroupPreReqs = oTestGroup.aidTestGroupPreReqs,
bmHourlySchedule = oTestGroup.bmHourlySchedule,
cMissingGangMembers = oArgsVariation.cGangMembers,
offQueue = len(aoItems));
aoItems.append(oItem);
# Done?
if cNotAtEnd == 0:
self.msgDebug('returns %s items' % (len(aoItems),));
return aoItems;
return aoItems;
| gpl-2.0 |
JioCloud/python-ceilometerclient | ceilometerclient/tests/test_utils.py | 1 | 8338 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import mock
import six
import sys
from ceilometerclient.common import utils
from ceilometerclient.tests import utils as test_utils
class UtilsTest(test_utils.BaseTestCase):
def test_prettytable(self):
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
# test that the prettytable output is wellformatted (left-aligned)
saved_stdout = sys.stdout
try:
sys.stdout = output_dict = six.StringIO()
utils.print_dict({'K': 'k', 'Key': 'Value'})
finally:
sys.stdout = saved_stdout
self.assertEqual(output_dict.getvalue(), '''\
+----------+-------+
| Property | Value |
+----------+-------+
| K | k |
| Key | Value |
+----------+-------+
''')
def test_print_list(self):
class Foo:
def __init__(self, one, two, three):
self.one = one
self.two = two
self.three = three
foo_list = [
Foo(10, 'a', 'B'),
Foo(8, 'c', 'c'),
Foo(12, '0', 'Z')]
def do_print_list(sortby):
saved_stdout = sys.stdout
try:
sys.stdout = output = six.StringIO()
utils.print_list(foo_list,
['one', 'two', 'three'],
['1st', '2nd', '3rd'],
{'one': lambda o: o.one * 10},
sortby)
finally:
sys.stdout = saved_stdout
return output.getvalue()
printed = do_print_list(None)
self.assertEqual(printed, '''\
+-----+-----+-----+
| 1st | 2nd | 3rd |
+-----+-----+-----+
| 100 | a | B |
| 80 | c | c |
| 120 | 0 | Z |
+-----+-----+-----+
''')
printed = do_print_list(0)
self.assertEqual(printed, '''\
+-----+-----+-----+
| 1st | 2nd | 3rd |
+-----+-----+-----+
| 80 | c | c |
| 100 | a | B |
| 120 | 0 | Z |
+-----+-----+-----+
''')
printed = do_print_list(1)
self.assertEqual(printed, '''\
+-----+-----+-----+
| 1st | 2nd | 3rd |
+-----+-----+-----+
| 120 | 0 | Z |
| 100 | a | B |
| 80 | c | c |
+-----+-----+-----+
''')
def test_args_array_to_dict(self):
my_args = {
'matching_metadata': ['metadata.key=metadata_value'],
'other': 'value'
}
cleaned_dict = utils.args_array_to_dict(my_args,
"matching_metadata")
self.assertEqual(cleaned_dict, {
'matching_metadata': {'metadata.key': 'metadata_value'},
'other': 'value'
})
def test_args_array_to_list_of_dicts(self):
starts = ['0 11 * * *', '"0 11 * * *"', '\'0 11 * * *\'']
timezones = [None, 'US/Eastern', '"US/Eastern"', '\'US/Eastern\'']
descs = [None, 'de sc', '"de sc"', '\'de sc\'']
for start, tz, desc in itertools.product(starts, timezones, descs):
my_args = {
'time_constraints': ['name=const1;start=%s;duration=1'
% start],
'other': 'value'
}
expected = {
'time_constraints': [dict(name='const1',
start='0 11 * * *',
duration='1')],
'other': 'value'
}
if tz:
my_args['time_constraints'][0] += ';timezone=%s' % tz
expected['time_constraints'][0]['timezone'] = 'US/Eastern'
if desc:
my_args['time_constraints'][0] += ';description=%s' % desc
expected['time_constraints'][0]['description'] = 'de sc'
cleaned = utils.args_array_to_list_of_dicts(my_args,
'time_constraints')
self.assertEqual(expected, cleaned)
def test_key_with_slash_to_nested_dict(self):
my_args = {
'combination_rule/alarm_ids': ['id1', 'id2'],
'combination_rule/operator': 'and',
'threshold_rule/threshold': 400,
'threshold_rule/statictic': 'avg',
'threshold_rule/comparison_operator': 'or',
}
nested_dict = utils.key_with_slash_to_nested_dict(my_args)
self.assertEqual(nested_dict, {
'combination_rule': {'alarm_ids': ['id1', 'id2'],
'operator': 'and'},
'threshold_rule': {'threshold': 400,
'statictic': 'avg',
'comparison_operator': 'or'},
})
def test_arg(self):
@utils.arg(help="not_required_no_default.")
def not_required_no_default():
pass
_, args = not_required_no_default.__dict__['arguments'][0]
self.assertEqual(args['help'], "not_required_no_default.")
@utils.arg(required=True, help="required_no_default.")
def required_no_default():
pass
_, args = required_no_default.__dict__['arguments'][0]
self.assertEqual(args['help'], "required_no_default. Required.")
@utils.arg(default=42, help="not_required_default.")
def not_required_default():
pass
_, args = not_required_default.__dict__['arguments'][0]
self.assertEqual(args['help'], "not_required_default. Defaults to 42.")
def test_merge_nested_dict(self):
dest = {'key': 'value',
'nested': {'key2': 'value2',
'key3': 'value3',
'nested2': {'key': 'value',
'some': 'thing'}}}
source = {'key': 'modified',
'nested': {'key3': 'modified3',
'nested2': {'key5': 'value5'}}}
utils.merge_nested_dict(dest, source, depth=1)
self.assertEqual(dest, {'key': 'modified',
'nested': {'key2': 'value2',
'key3': 'modified3',
'nested2': {'key5': 'value5'}}})
def test_merge_nested_dict_no_depth(self):
dest = {'key': 'value',
'nested': {'key2': 'value2',
'key3': 'value3',
'nested2': {'key': 'value',
'some': 'thing'}}}
source = {'key': 'modified',
'nested': {'key3': 'modified3',
'nested2': {'key5': 'value5'}}}
utils.merge_nested_dict(dest, source)
self.assertEqual(dest, {'key': 'modified',
'nested': {'key3': 'modified3',
'nested2': {'key5': 'value5'}}})
@mock.patch('prettytable.PrettyTable')
def test_format_nested_list_of_dict(self, pt_mock):
actual_rows = []
def mock_add_row(row):
actual_rows.append(row)
table = mock.Mock()
table.add_row = mock_add_row
table.get_string.return_value = "the table"
test_data = [
{'column_1': 'value_11', 'column_2': 'value_21'},
{'column_1': 'value_12', 'column_2': 'value_22'}
]
columns = ['column_1', 'column_2']
pt_mock.return_value = table
rval = utils.format_nested_list_of_dict(test_data, columns)
self.assertEqual("the table", rval)
self.assertEqual([['value_11', 'value_21'], ['value_12', 'value_22']],
actual_rows)
| apache-2.0 |
flyfei/python-for-android | python-modules/twisted/twisted/test/test_doc.py | 99 | 3683 | from twisted.trial import unittest
import inspect, glob, os
from os import path
from twisted.python import reflect
import twisted
def errorInFile(f, line=17, name=''):
"""Return a filename formatted so emacs will recognize it as an error point
@param line: Line number in file. Defaults to 17 because that's about how
long the copyright headers are.
"""
return '%s:%d:%s' % (f, line, name)
# return 'File "%s", line %d, in %s' % (f, line, name)
class DocCoverage(unittest.TestCase):
def setUp(self):
remove = len(os.path.dirname(os.path.dirname(twisted.__file__)))+1
def visit(dirlist, directory, files):
if '__init__.py' in files:
d = directory[remove:].replace('/','.')
dirlist.append(d)
self.packageNames = []
os.path.walk(os.path.dirname(twisted.__file__),
visit, self.packageNames)
def testModules(self):
"""Looking for docstrings in all modules."""
docless = []
for packageName in self.packageNames:
if packageName in ('twisted.test',):
# because some stuff in here behaves oddly when imported
continue
try:
package = reflect.namedModule(packageName)
except ImportError, e:
# This is testing doc coverage, not importability.
# (Really, I don't want to deal with the fact that I don't
# have pyserial installed.)
# print e
pass
else:
docless.extend(self.modulesInPackage(packageName, package))
self.failIf(docless, "No docstrings in module files:\n"
"%s" % ('\n'.join(map(errorInFile, docless)),))
def modulesInPackage(self, packageName, package):
docless = []
directory = path.dirname(package.__file__)
for modfile in glob.glob(path.join(directory, '*.py')):
moduleName = inspect.getmodulename(modfile)
if moduleName == '__init__':
# These are tested by test_packages.
continue
elif moduleName in ('spelunk_gnome','gtkmanhole'):
# argh special case pygtk evil argh. How does epydoc deal
# with this?
continue
try:
module = reflect.namedModule('.'.join([packageName,
moduleName]))
except Exception, e:
# print moduleName, "misbehaved:", e
pass
else:
if not inspect.getdoc(module):
docless.append(modfile)
return docless
def testPackages(self):
"""Looking for docstrings in all packages."""
docless = []
for packageName in self.packageNames:
try:
package = reflect.namedModule(packageName)
except Exception, e:
# This is testing doc coverage, not importability.
# (Really, I don't want to deal with the fact that I don't
# have pyserial installed.)
# print e
pass
else:
if not inspect.getdoc(package):
docless.append(package.__file__.replace('.pyc','.py'))
self.failIf(docless, "No docstrings for package files\n"
"%s" % ('\n'.join(map(errorInFile, docless),)))
# This test takes a while and doesn't come close to passing. :(
testModules.skip = "Activate me when you feel like writing docstrings, and fixing GTK crashing bugs."
| apache-2.0 |
msmolens/ITK | Modules/ThirdParty/pygccxml/src/pygccxml/declarations/container_traits.py | 12 | 19969 | # Copyright 2014 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines few algorithms, that deals with different properties of std containers
"""
import string
from . import calldef
from . import cpptypes
from . import namespace
from . import templates
from . import type_traits
from . import class_declaration
from .. import utils
std_namespaces = ('std', 'stdext', '__gnu_cxx')
class defaults_eraser:
@staticmethod
def normalize(type_str):
return type_str.replace(' ', '')
@staticmethod
def replace_basic_string(cls_name):
strings = {
'std::string': (
('std::basic_string<char,std::char_traits<char>,' +
'std::allocator<char> >'),
('std::basic_string<char, std::char_traits<char>, ' +
'std::allocator<char> >')),
'std::wstring': (
('std::basic_string<wchar_t,std::char_traits<wchar_t>,' +
'std::allocator<wchar_t> >'),
('std::basic_string<wchar_t, std::char_traits<wchar_t>, ' +
'std::allocator<wchar_t> >'))}
new_name = cls_name
for short_name, long_names in strings.items():
for lname in long_names:
new_name = new_name.replace(lname, short_name)
return new_name
class recursive_impl:
@staticmethod
def decorated_call_prefix(cls_name, text, doit):
has_text = cls_name.startswith(text)
if has_text:
cls_name = cls_name[len(text):]
answer = doit(cls_name)
if has_text:
answer = text + answer
return answer
@staticmethod
def decorated_call_suffix(cls_name, text, doit):
has_text = cls_name.endswith(text)
if has_text:
cls_name = cls_name[: len(text)]
answer = doit(cls_name)
if has_text:
answer = answer + text
return answer
@staticmethod
def erase_call(cls_name):
global find_container_traits
c_traits = find_container_traits(cls_name)
if not c_traits:
return cls_name
return c_traits.remove_defaults(cls_name)
@staticmethod
def erase_recursive(cls_name):
ri = defaults_eraser.recursive_impl
no_std = lambda cls_name: ri.decorated_call_prefix(
cls_name, 'std::', ri.erase_call)
no_stdext = lambda cls_name: ri.decorated_call_prefix(
cls_name, 'stdext::', no_std)
no_gnustd = lambda cls_name: ri.decorated_call_prefix(
cls_name, '__gnu_cxx::', no_stdext)
no_const = lambda cls_name: ri.decorated_call_prefix(
cls_name, 'const ', no_gnustd)
no_end_const = lambda cls_name: ri.decorated_call_suffix(
cls_name, ' const', no_const)
return no_end_const(cls_name)
@staticmethod
def erase_recursive(cls_name):
return defaults_eraser.recursive_impl.erase_recursive(cls_name)
@staticmethod
def erase_allocator(cls_name, default_allocator='std::allocator'):
cls_name = defaults_eraser.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 2 != len(c_args):
return
value_type = c_args[0]
tmpl = string.Template(
"$container< $value_type, $allocator<$value_type> >")
tmpl = tmpl.substitute(
container=c_name,
value_type=value_type,
allocator=default_allocator)
if defaults_eraser.normalize(cls_name) == \
defaults_eraser.normalize(tmpl):
return templates.join(
c_name, [defaults_eraser.erase_recursive(value_type)])
@staticmethod
def erase_container(cls_name, default_container_name='std::deque'):
cls_name = defaults_eraser.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 2 != len(c_args):
return
value_type = c_args[0]
dc_no_defaults = defaults_eraser.erase_recursive(c_args[1])
if defaults_eraser.normalize(dc_no_defaults) \
!= defaults_eraser.normalize(
templates.join(default_container_name, [value_type])):
return
return templates.join(
c_name, [defaults_eraser.erase_recursive(value_type)])
@staticmethod
def erase_container_compare(
cls_name,
default_container_name='std::vector',
default_compare='std::less'):
cls_name = defaults_eraser.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 3 != len(c_args):
return
dc_no_defaults = defaults_eraser.erase_recursive(c_args[1])
if defaults_eraser.normalize(dc_no_defaults) \
!= defaults_eraser.normalize(
templates.join(default_container_name, [c_args[0]])):
return
dcomp_no_defaults = defaults_eraser.erase_recursive(c_args[2])
if defaults_eraser.normalize(dcomp_no_defaults) \
!= defaults_eraser.normalize(
templates.join(default_compare, [c_args[0]])):
return
value_type = defaults_eraser.erase_recursive(c_args[0])
return templates.join(c_name, [value_type])
@staticmethod
def erase_compare_allocator(
cls_name,
default_compare='std::less',
default_allocator='std::allocator'):
cls_name = defaults_eraser.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 3 != len(c_args):
return
value_type = c_args[0]
tmpl = string.Template(
"$container< $value_type, $compare<$value_type>, " +
"$allocator<$value_type> >")
tmpl = tmpl.substitute(
container=c_name,
value_type=value_type,
compare=default_compare,
allocator=default_allocator)
if defaults_eraser.normalize(cls_name) == \
defaults_eraser.normalize(tmpl):
return templates.join(
c_name, [defaults_eraser.erase_recursive(value_type)])
@staticmethod
def erase_map_compare_allocator(
cls_name,
default_compare='std::less',
default_allocator='std::allocator'):
cls_name = defaults_eraser.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 4 != len(c_args):
return
key_type = c_args[0]
mapped_type = c_args[1]
tmpls = [
string.Template(
"$container< $key_type, $mapped_type, $compare<$key_type>, " +
"$allocator< std::pair< const $key_type, $mapped_type> > >"),
string.Template(
"$container< $key_type, $mapped_type, $compare<$key_type>, " +
"$allocator< std::pair< $key_type const, $mapped_type> > >"),
string.Template(
"$container< $key_type, $mapped_type, $compare<$key_type>, " +
"$allocator< std::pair< $key_type, $mapped_type> > >")]
for tmpl in tmpls:
tmpl = tmpl.substitute(
container=c_name,
key_type=key_type,
mapped_type=mapped_type,
compare=default_compare,
allocator=default_allocator)
if defaults_eraser.normalize(cls_name) == \
defaults_eraser.normalize(tmpl):
return templates.join(
c_name,
[defaults_eraser.erase_recursive(key_type),
defaults_eraser.erase_recursive(mapped_type)])
@staticmethod
def erase_hash_allocator(cls_name):
cls_name = defaults_eraser.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if len(c_args) < 3:
return
default_hash = None
default_less = 'std::less'
default_equal_to = 'std::equal_to'
default_allocator = 'std::allocator'
tmpl = None
if 3 == len(c_args):
default_hash = 'hash_compare'
tmpl = (
"$container< $value_type, $hash<$value_type, " +
"$less<$value_type> >, $allocator<$value_type> >")
elif 4 == len(c_args):
default_hash = 'hash'
tmpl = (
"$container< $value_type, $hash<$value_type >, " +
"$equal_to<$value_type >, $allocator<$value_type> >")
else:
return
value_type = c_args[0]
tmpl = string.Template(tmpl)
for ns in std_namespaces:
inst = tmpl.substitute(
container=c_name,
value_type=value_type,
hash=ns + '::' + default_hash,
less=default_less,
equal_to=default_equal_to,
allocator=default_allocator)
if defaults_eraser.normalize(cls_name) == \
defaults_eraser.normalize(inst):
return templates.join(
c_name, [defaults_eraser.erase_recursive(value_type)])
@staticmethod
def erase_hashmap_compare_allocator(cls_name):
cls_name = defaults_eraser.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
default_hash = None
default_less = 'std::less'
default_allocator = 'std::allocator'
default_equal_to = 'std::equal_to'
tmpl = None
key_type = None
mapped_type = None
if 2 < len(c_args):
key_type = c_args[0]
mapped_type = c_args[1]
else:
return
if 4 == len(c_args):
default_hash = 'hash_compare'
tmpl = string.Template(
"$container< $key_type, $mapped_type, " +
"$hash<$key_type, $less<$key_type> >, " +
"$allocator< std::pair< const $key_type, $mapped_type> > >")
if key_type.startswith('const ') or key_type.endswith(' const'):
tmpl = string.Template(
"$container< $key_type, $mapped_type, $hash<$key_type, " +
"$less<$key_type> >, $allocator< std::pair< $key_type, " +
"$mapped_type> > >")
elif 5 == len(c_args):
default_hash = 'hash'
tmpl = string.Template(
"$container< $key_type, $mapped_type, $hash<$key_type >, " +
"$equal_to<$key_type>, $allocator< $mapped_type> >")
if key_type.startswith('const ') or key_type.endswith(' const'):
tmpl = string.Template(
"$container< $key_type, $mapped_type, " +
"$hash<$key_type >, $equal_to<$key_type>, " +
"$allocator< $mapped_type > >")
else:
return
for ns in std_namespaces:
inst = tmpl.substitute(
container=c_name,
key_type=key_type,
mapped_type=mapped_type,
hash=ns + '::' + default_hash,
less=default_less,
equal_to=default_equal_to,
allocator=default_allocator)
if defaults_eraser.normalize(cls_name) == \
defaults_eraser.normalize(inst):
return templates.join(
c_name,
[defaults_eraser.erase_recursive(key_type),
defaults_eraser.erase_recursive(mapped_type)])
class container_traits_impl_t:
"""
implements the functionality needed for convenient work with STD container
classes
Implemented functionality:
* find out whether a declaration is STD container or not
* find out container value( mapped ) type
This class tries to be useful as much, as possible. For example, for class
declaration( and not definition ) it parsers the class name in order to
extract the information.
"""
def __init__(
self,
container_name,
element_type_index,
element_type_typedef,
defaults_remover,
key_type_index=None,
key_type_typedef=None):
"""
:param container_name: std container name
:param element_type_index: position of value\\mapped type within
template arguments list
:param element_type_typedef: class typedef to the value\\mapped type
:param key_type_index: position of key type within template arguments
list
:param key_type_typedef: class typedef to the key type
"""
self._name = container_name
self.remove_defaults_impl = defaults_remover
self.element_type_index = element_type_index
self.element_type_typedef = element_type_typedef
self.key_type_index = key_type_index
self.key_type_typedef = key_type_typedef
def name(self):
return self._name
def get_container_or_none(self, type_):
"""returns reference to the class declaration or None"""
type_ = type_traits.remove_alias(type_)
type_ = type_traits.remove_cv(type_)
cls = None
if isinstance(type_, cpptypes.declarated_t):
cls = type_traits.remove_alias(type_.declaration)
elif isinstance(type_, class_declaration.class_t):
cls = type_
elif isinstance(type_, class_declaration.class_declaration_t):
cls = type_
else:
return
if not cls.name.startswith(self.name() + '<'):
return
for ns in std_namespaces:
if type_traits.impl_details.is_defined_in_xxx(ns, cls):
return cls
def is_my_case(self, type_):
"""checks, whether type is STD container or not"""
return bool(self.get_container_or_none(type_))
def class_declaration(self, type_):
"""returns reference to the class declaration"""
cls = self.get_container_or_none(type_)
if not cls:
raise TypeError(
'Type "%s" is not instantiation of std::%s' %
(type_.decl_string, self.name()))
return cls
def is_sequence(self, type_):
# raise exception if type is not container
unused = self.class_declaration(type_)
return self.key_type_index is None
def is_mapping(self, type_):
return not self.is_sequence(type_)
def __find_xxx_type(
self,
type_,
xxx_index,
xxx_typedef,
cache_property_name):
cls = self.class_declaration(type_)
result = getattr(cls.cache, cache_property_name)
if not result:
if isinstance(cls, class_declaration.class_t):
xxx_type = cls.typedef(xxx_typedef, recursive=False).type
result = type_traits.remove_declarated(xxx_type)
else:
xxx_type_str = templates.args(cls.name)[xxx_index]
result = type_traits.impl_details.find_value_type(
cls.top_parent,
xxx_type_str)
if None is result:
raise RuntimeError(
"Unable to find out %s '%s' key\\value type." %
(self.name(), cls.decl_string))
setattr(cls.cache, cache_property_name, result)
return result
def element_type(self, type_):
"""returns reference to the class value\\mapped type declaration"""
return self.__find_xxx_type(
type_,
self.element_type_index,
self.element_type_typedef,
'container_element_type')
def key_type(self, type_):
"""returns reference to the class key type declaration"""
if not self.is_mapping(type_):
raise TypeError(
'Type "%s" is not "mapping" container' %
str(type_))
return self.__find_xxx_type(
type_,
self.key_type_index,
self.key_type_typedef,
'container_key_type')
def remove_defaults(self, type_or_string):
"""
removes template defaults from a template class instantiation
For example:
.. code-block:: c++
std::vector< int, std::allocator< int > >
will become
.. code-block:: c++
std::vector< int >
"""
name = type_or_string
if not utils.is_str(type_or_string):
name = self.class_declaration(type_or_string).name
if not self.remove_defaults_impl:
return name
no_defaults = self.remove_defaults_impl(name)
if not no_defaults:
return name
else:
return no_defaults
create_traits = container_traits_impl_t
list_traits = create_traits(
'list',
0,
'value_type',
defaults_eraser.erase_allocator)
deque_traits = create_traits(
'deque',
0,
'value_type',
defaults_eraser.erase_allocator)
queue_traits = create_traits(
'queue',
0,
'value_type',
defaults_eraser.erase_container)
priority_queue_traits = create_traits(
'priority_queue',
0,
'value_type',
defaults_eraser.erase_container_compare)
vector_traits = create_traits(
'vector',
0,
'value_type',
defaults_eraser.erase_allocator)
stack_traits = create_traits(
'stack',
0,
'value_type',
defaults_eraser.erase_container)
map_traits = create_traits(
'map',
1,
'mapped_type',
defaults_eraser.erase_map_compare_allocator,
key_type_index=0,
key_type_typedef='key_type')
multimap_traits = create_traits(
'multimap',
1,
'mapped_type',
defaults_eraser.erase_map_compare_allocator,
key_type_index=0,
key_type_typedef='key_type')
hash_map_traits = create_traits(
'hash_map',
1,
'mapped_type',
defaults_eraser.erase_hashmap_compare_allocator,
key_type_index=0,
key_type_typedef='key_type')
hash_multimap_traits = create_traits(
'hash_multimap',
1,
'mapped_type',
defaults_eraser.erase_hashmap_compare_allocator,
key_type_index=0,
key_type_typedef='key_type')
set_traits = create_traits(
'set',
0,
'value_type',
defaults_eraser.erase_compare_allocator)
multiset_traits = create_traits(
'multiset',
0,
'value_type',
defaults_eraser.erase_compare_allocator)
hash_set_traits = create_traits(
'hash_set',
0,
'value_type',
defaults_eraser.erase_hash_allocator)
hash_multiset_traits = create_traits(
'hash_multiset',
0,
'value_type',
defaults_eraser.erase_hash_allocator)
container_traits = (
list_traits,
deque_traits,
queue_traits,
priority_queue_traits,
vector_traits,
stack_traits,
map_traits,
multimap_traits,
hash_map_traits,
hash_multimap_traits,
set_traits,
hash_set_traits,
multiset_traits,
hash_multiset_traits)
"""tuple of all STD container traits classes"""
def find_container_traits(cls_or_string):
if utils.is_str(cls_or_string):
if not templates.is_instantiation(cls_or_string):
return None
name = templates.name(cls_or_string)
if name.startswith('std::'):
name = name[len('std::'):]
for cls_traits in container_traits:
if cls_traits.name() == name:
return cls_traits
else:
for cls_traits in container_traits:
if cls_traits.is_my_case(cls_or_string):
return cls_traits
| apache-2.0 |
panfengfeng/spark | examples/src/main/python/ml/chisq_selector_example.py | 121 | 1716 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.feature import ChiSqSelector
from pyspark.ml.linalg import Vectors
# $example off$
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("ChiSqSelectorExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
(7, Vectors.dense([0.0, 0.0, 18.0, 1.0]), 1.0,),
(8, Vectors.dense([0.0, 1.0, 12.0, 0.0]), 0.0,),
(9, Vectors.dense([1.0, 0.0, 15.0, 0.1]), 0.0,)], ["id", "features", "clicked"])
selector = ChiSqSelector(numTopFeatures=1, featuresCol="features",
outputCol="selectedFeatures", labelCol="clicked")
result = selector.fit(df).transform(df)
print("ChiSqSelector output with top %d features selected" % selector.getNumTopFeatures())
result.show()
# $example off$
spark.stop()
| apache-2.0 |
tonybaloney/st2 | st2api/tests/unit/controllers/v1/test_executions_filters.py | 1 | 17542 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import random
import datetime
import bson
import six
from six.moves import http_client
import st2tests.config as tests_config
tests_config.parse_args()
from tests import FunctionalTest
from st2tests.fixtures.packs import executions as fixture
from st2tests.fixtures import history_views
from st2common.util import isotime
from st2common.util import date as date_utils
from st2api.controllers.v1.actionexecutions import ActionExecutionsController
from st2api.controllers.v1.executionviews import FILTERS_WITH_VALID_NULL_VALUES
from st2common.persistence.execution import ActionExecution
from st2common.models.api.execution import ActionExecutionAPI
class TestActionExecutionFilters(FunctionalTest):
@classmethod
def testDownClass(cls):
pass
@classmethod
def setUpClass(cls):
super(TestActionExecutionFilters, cls).setUpClass()
cls.dt_base = date_utils.add_utc_tz(datetime.datetime(2014, 12, 25, 0, 0, 0))
cls.num_records = 100
cls.refs = {}
cls.start_timestamps = []
cls.fake_types = [
{
'trigger': copy.deepcopy(fixture.ARTIFACTS['trigger']),
'trigger_type': copy.deepcopy(fixture.ARTIFACTS['trigger_type']),
'trigger_instance': copy.deepcopy(fixture.ARTIFACTS['trigger_instance']),
'rule': copy.deepcopy(fixture.ARTIFACTS['rule']),
'action': copy.deepcopy(fixture.ARTIFACTS['actions']['chain']),
'runner': copy.deepcopy(fixture.ARTIFACTS['runners']['action-chain']),
'liveaction': copy.deepcopy(fixture.ARTIFACTS['liveactions']['workflow']),
'context': copy.deepcopy(fixture.ARTIFACTS['context']),
'children': []
},
{
'action': copy.deepcopy(fixture.ARTIFACTS['actions']['local']),
'runner': copy.deepcopy(fixture.ARTIFACTS['runners']['run-local']),
'liveaction': copy.deepcopy(fixture.ARTIFACTS['liveactions']['task1'])
}
]
def assign_parent(child):
candidates = [v for k, v in cls.refs.iteritems() if v.action['name'] == 'chain']
if candidates:
parent = random.choice(candidates)
child['parent'] = str(parent.id)
parent.children.append(child['id'])
cls.refs[str(parent.id)] = ActionExecution.add_or_update(parent)
for i in range(cls.num_records):
obj_id = str(bson.ObjectId())
timestamp = cls.dt_base + datetime.timedelta(seconds=i)
fake_type = random.choice(cls.fake_types)
data = copy.deepcopy(fake_type)
data['id'] = obj_id
data['start_timestamp'] = isotime.format(timestamp, offset=False)
data['end_timestamp'] = isotime.format(timestamp, offset=False)
data['status'] = data['liveaction']['status']
data['result'] = data['liveaction']['result']
if fake_type['action']['name'] == 'local' and random.choice([True, False]):
assign_parent(data)
wb_obj = ActionExecutionAPI(**data)
db_obj = ActionExecutionAPI.to_model(wb_obj)
cls.refs[obj_id] = ActionExecution.add_or_update(db_obj)
cls.start_timestamps.append(timestamp)
cls.start_timestamps = sorted(cls.start_timestamps)
def test_get_all(self):
response = self.app.get('/v1/executions')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), self.num_records)
self.assertEqual(response.headers['X-Total-Count'], str(self.num_records))
ids = [item['id'] for item in response.json]
self.assertListEqual(sorted(ids), sorted(self.refs.keys()))
def test_get_all_exclude_attributes(self):
# No attributes excluded
response = self.app.get('/v1/executions?action=executions.local&limit=1')
self.assertEqual(response.status_int, 200)
self.assertTrue('result' in response.json[0])
# Exclude "result" attribute
path = '/v1/executions?action=executions.local&limit=1&exclude_attributes=result'
response = self.app.get(path)
self.assertEqual(response.status_int, 200)
self.assertFalse('result' in response.json[0])
def test_get_one(self):
obj_id = random.choice(self.refs.keys())
response = self.app.get('/v1/executions/%s' % obj_id)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, dict)
record = response.json
fake_record = ActionExecutionAPI.from_model(self.refs[obj_id])
self.assertEqual(record['id'], obj_id)
self.assertDictEqual(record['action'], fake_record.action)
self.assertDictEqual(record['runner'], fake_record.runner)
self.assertDictEqual(record['liveaction'], fake_record.liveaction)
def test_get_one_failed(self):
response = self.app.get('/v1/executions/%s' % str(bson.ObjectId()),
expect_errors=True)
self.assertEqual(response.status_int, http_client.NOT_FOUND)
def test_limit(self):
limit = 10
refs = [k for k, v in six.iteritems(self.refs) if v.action['name'] == 'chain']
response = self.app.get('/v1/executions?action=executions.chain&limit=%s' %
limit)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), limit)
self.assertEqual(response.headers['X-Limit'], str(limit))
self.assertEqual(response.headers['X-Total-Count'], str(len(refs)), response.json)
ids = [item['id'] for item in response.json]
self.assertListEqual(list(set(ids) - set(refs)), [])
def test_query(self):
refs = [k for k, v in six.iteritems(self.refs) if v.action['name'] == 'chain']
response = self.app.get('/v1/executions?action=executions.chain')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), len(refs))
self.assertEqual(response.headers['X-Total-Count'], str(len(refs)))
ids = [item['id'] for item in response.json]
self.assertListEqual(sorted(ids), sorted(refs))
def test_filters(self):
excludes = ['parent', 'timestamp', 'action', 'liveaction', 'timestamp_gt',
'timestamp_lt', 'status']
for param, field in six.iteritems(ActionExecutionsController.supported_filters):
if param in excludes:
continue
value = self.fake_types[0]
for item in field.split('.'):
value = value[item]
response = self.app.get('/v1/executions?%s=%s' % (param, value))
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertGreater(len(response.json), 0)
self.assertGreater(int(response.headers['X-Total-Count']), 0)
def test_parent(self):
refs = [v for k, v in six.iteritems(self.refs)
if v.action['name'] == 'chain' and v.children]
self.assertTrue(refs)
ref = random.choice(refs)
response = self.app.get('/v1/executions?parent=%s' % str(ref.id))
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), len(ref.children))
self.assertEqual(response.headers['X-Total-Count'], str(len(ref.children)))
ids = [item['id'] for item in response.json]
self.assertListEqual(sorted(ids), sorted(ref.children))
def test_parentless(self):
refs = {k: v for k, v in six.iteritems(self.refs) if not getattr(v, 'parent', None)}
self.assertTrue(refs)
self.assertNotEqual(len(refs), self.num_records)
response = self.app.get('/v1/executions?parent=null')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), len(refs))
self.assertEqual(response.headers['X-Total-Count'], str(len(refs)))
ids = [item['id'] for item in response.json]
self.assertListEqual(sorted(ids), sorted(refs.keys()))
def test_pagination(self):
retrieved = []
page_size = 10
page_count = self.num_records / page_size
for i in range(page_count):
offset = i * page_size
response = self.app.get('/v1/executions?offset=%s&limit=%s' % (
offset, page_size))
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), page_size)
self.assertEqual(response.headers['X-Limit'], str(page_size))
self.assertEqual(response.headers['X-Total-Count'], str(self.num_records))
ids = [item['id'] for item in response.json]
self.assertListEqual(list(set(ids) - set(self.refs.keys())), [])
self.assertListEqual(sorted(list(set(ids) - set(retrieved))), sorted(ids))
retrieved += ids
self.assertListEqual(sorted(retrieved), sorted(self.refs.keys()))
def test_ui_history_query(self):
# In this test we only care about making sure this exact query works. This query is used
# by the webui for the history page so it is special and breaking this is bad.
limit = 50
history_query = '/v1/executions?limit={}&parent=null&exclude_attributes=' \
'result%2Ctrigger_instance&status=&action=&trigger_type=&rule=&' \
'offset=0'.format(limit)
response = self.app.get(history_query)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), limit)
self.assertTrue(response.headers['X-Total-Count'] > limit)
def test_datetime_range(self):
dt_range = '2014-12-25T00:00:10Z..2014-12-25T00:00:19Z'
response = self.app.get('/v1/executions?timestamp=%s' % dt_range)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), 10)
self.assertEqual(response.headers['X-Total-Count'], '10')
dt1 = response.json[0]['start_timestamp']
dt2 = response.json[9]['start_timestamp']
self.assertLess(isotime.parse(dt1), isotime.parse(dt2))
dt_range = '2014-12-25T00:00:19Z..2014-12-25T00:00:10Z'
response = self.app.get('/v1/executions?timestamp=%s' % dt_range)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), 10)
self.assertEqual(response.headers['X-Total-Count'], '10')
dt1 = response.json[0]['start_timestamp']
dt2 = response.json[9]['start_timestamp']
self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
def test_default_sort(self):
response = self.app.get('/v1/executions')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
dt1 = response.json[0]['start_timestamp']
dt2 = response.json[len(response.json) - 1]['start_timestamp']
self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
def test_ascending_sort(self):
response = self.app.get('/v1/executions?sort_asc=True')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
dt1 = response.json[0]['start_timestamp']
dt2 = response.json[len(response.json) - 1]['start_timestamp']
self.assertLess(isotime.parse(dt1), isotime.parse(dt2))
def test_descending_sort(self):
response = self.app.get('/v1/executions?sort_desc=True')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
dt1 = response.json[0]['start_timestamp']
dt2 = response.json[len(response.json) - 1]['start_timestamp']
self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
def test_timestamp_lt_and_gt_filter(self):
def isoformat(timestamp):
return isotime.format(timestamp, offset=False)
index = len(self.start_timestamps) - 1
timestamp = self.start_timestamps[index]
# Last (largest) timestamp, there are no executions with a greater timestamp
timestamp = self.start_timestamps[-1]
response = self.app.get('/v1/executions?timestamp_gt=%s' % (isoformat(timestamp)))
self.assertEqual(len(response.json), 0)
# First (smallest) timestamp, there are no executions with a smaller timestamp
timestamp = self.start_timestamps[0]
response = self.app.get('/v1/executions?timestamp_lt=%s' % (isoformat(timestamp)))
self.assertEqual(len(response.json), 0)
# Second last, there should be one timestamp greater than it
timestamp = self.start_timestamps[-2]
response = self.app.get('/v1/executions?timestamp_gt=%s' % (isoformat(timestamp)))
self.assertEqual(len(response.json), 1)
self.assertTrue(isotime.parse(response.json[0]['start_timestamp']) > timestamp)
# Second one, there should be one timestamp smaller than it
timestamp = self.start_timestamps[1]
response = self.app.get('/v1/executions?timestamp_lt=%s' % (isoformat(timestamp)))
self.assertEqual(len(response.json), 1)
self.assertTrue(isotime.parse(response.json[0]['start_timestamp']) < timestamp)
# Half of the timestamps should be smaller
index = (len(self.start_timestamps) - 1) // 2
timestamp = self.start_timestamps[index]
response = self.app.get('/v1/executions?timestamp_lt=%s' % (isoformat(timestamp)))
self.assertEqual(len(response.json), index)
self.assertTrue(isotime.parse(response.json[0]['start_timestamp']) < timestamp)
# Half of the timestamps should be greater
index = (len(self.start_timestamps) - 1) // 2
timestamp = self.start_timestamps[-index]
response = self.app.get('/v1/executions?timestamp_gt=%s' % (isoformat(timestamp)))
self.assertEqual(len(response.json), (index - 1))
self.assertTrue(isotime.parse(response.json[0]['start_timestamp']) > timestamp)
# Both, lt and gt filters, should return exactly two results
timestamp_gt = self.start_timestamps[10]
timestamp_lt = self.start_timestamps[13]
response = self.app.get('/v1/executions?timestamp_gt=%s×tamp_lt=%s' %
(isoformat(timestamp_gt), isoformat(timestamp_lt)))
self.assertEqual(len(response.json), 2)
self.assertTrue(isotime.parse(response.json[0]['start_timestamp']) > timestamp_gt)
self.assertTrue(isotime.parse(response.json[1]['start_timestamp']) > timestamp_gt)
self.assertTrue(isotime.parse(response.json[0]['start_timestamp']) < timestamp_lt)
self.assertTrue(isotime.parse(response.json[1]['start_timestamp']) < timestamp_lt)
def test_filters_view(self):
response = self.app.get('/v1/executions/views/filters')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, dict)
self.assertEqual(len(response.json), len(history_views.ARTIFACTS['filters']['default']))
for key, value in six.iteritems(history_views.ARTIFACTS['filters']['default']):
filter_values = response.json[key]
# Verify empty (None / null) filters are excluded
if key not in FILTERS_WITH_VALID_NULL_VALUES:
self.assertTrue(None not in filter_values)
if None in value or None in filter_values:
filter_values = [item for item in filter_values if item is not None]
value = [item for item in value if item is not None]
self.assertEqual(set(filter_values), set(value))
def test_filters_view_specific_types(self):
response = self.app.get('/v1/executions/views/filters?types=action,user,nonexistent')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, dict)
self.assertEqual(len(response.json), len(history_views.ARTIFACTS['filters']['specific']))
for key, value in six.iteritems(history_views.ARTIFACTS['filters']['specific']):
self.assertEqual(set(response.json[key]), set(value))
| apache-2.0 |
jainaman224/Algo_Ds_Notes | Connected_Components_Undirected_Graph/Connected_Components_Undirected_Graph.py | 1 | 1725 |
class Graph:
# class init function
def __init__(self,nodes):
self.Nodes = nodes
self.adj = [[] for i in range(nodes)]
# function to add Edge in the graph
def addEdge(self, u, v):
# as it is a undirected graph
self.adj[u].append(v)
self.adj[v].append(u)
# Recursive function to perform DFS on graph using adjacency list
def DFS(self, vertex, visited):
# mark the vertex and print it.
visited[vertex] = True
print(vertex,end=" ")
for i in range(len(self.adj[vertex])):
if not visited[ self.adj[vertex][i] ]:
self.DFS(self.adj[vertex][i], visited)
# function to find number of connected Components in graph
def connectedComponents(self):
connected_components = 1
visited = [False for i in range(self.Nodes)]
for vertex in range(self.Nodes):
# if vertex is not visited perform DFS
# if vertex is already visited, then it must be part of already visited connected component
if visited[vertex] == False:
print("Connected Commponent:",connected_components)
self.DFS(vertex, visited)
print()
connected_components += 1
def main():
# create a graph with 6 nodes vertices numbered from
# 0 to 5
#
# 0---3 1--4
# | |
# 2 5
nodes = 6
graph = Graph(nodes)
graph.addEdge(0,2)
graph.addEdge(3,0)
graph.addEdge(1,4)
graph.addEdge(5,1)
graph.connectedComponents()
if __name__ == "__main__":
main()
'''
Output:
Connected Commponent: 1
0 2 3
Connected Commponent: 2
1 4 5
'''
| gpl-3.0 |
liqin75/vse-vpnaas-plugin | quantum/tests/unit/test_quantum_manager.py | 5 | 3062 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import types
import fixtures
from oslo.config import cfg
from quantum.common import config
from quantum.common.test_lib import test_config
from quantum.manager import QuantumManager
from quantum.openstack.common import log as logging
from quantum.plugins.common import constants
from quantum.tests import base
from quantum.tests.unit import dummy_plugin
LOG = logging.getLogger(__name__)
DB_PLUGIN_KLASS = 'quantum.db.db_base_plugin_v2.QuantumDbPluginV2'
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class QuantumManagerTestCase(base.BaseTestCase):
def setUp(self):
super(QuantumManagerTestCase, self).setUp()
args = ['--config-file', etcdir('quantum.conf.test')]
# If test_config specifies some config-file, use it, as well
config.parse(args=args)
self.addCleanup(cfg.CONF.reset)
self.useFixture(
fixtures.MonkeyPatch('quantum.manager.QuantumManager._instance'))
def test_service_plugin_is_loaded(self):
cfg.CONF.set_override("core_plugin",
test_config.get('plugin_name_v2',
DB_PLUGIN_KLASS))
cfg.CONF.set_override("service_plugins",
["quantum.tests.unit.dummy_plugin."
"DummyServicePlugin"])
mgr = QuantumManager.get_instance()
plugin = mgr.get_service_plugins()[constants.DUMMY]
self.assertTrue(
isinstance(plugin,
(dummy_plugin.DummyServicePlugin, types.ClassType)),
"loaded plugin should be of type QuantumDummyPlugin")
def test_multiple_plugins_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins",
["quantum.tests.unit.dummy_plugin."
"QuantumDummyPlugin",
"quantum.tests.unit.dummy_plugin."
"QuantumDummyPlugin"])
try:
QuantumManager.get_instance().get_service_plugins()
self.assertTrue(False,
"Shouldn't load multiple plugins "
"for the same type")
except Exception as e:
LOG.debug(str(e))
| apache-2.0 |
jkorell/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/cp856.py | 93 | 12986 | """ Python Character Mapping Codec cp856 generated from 'MAPPINGS/VENDORS/MISC/CP856.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp856',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u05d0' # 0x80 -> HEBREW LETTER ALEF
u'\u05d1' # 0x81 -> HEBREW LETTER BET
u'\u05d2' # 0x82 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x83 -> HEBREW LETTER DALET
u'\u05d4' # 0x84 -> HEBREW LETTER HE
u'\u05d5' # 0x85 -> HEBREW LETTER VAV
u'\u05d6' # 0x86 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x87 -> HEBREW LETTER HET
u'\u05d8' # 0x88 -> HEBREW LETTER TET
u'\u05d9' # 0x89 -> HEBREW LETTER YOD
u'\u05da' # 0x8A -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x8B -> HEBREW LETTER KAF
u'\u05dc' # 0x8C -> HEBREW LETTER LAMED
u'\u05dd' # 0x8D -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x8E -> HEBREW LETTER MEM
u'\u05df' # 0x8F -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x90 -> HEBREW LETTER NUN
u'\u05e1' # 0x91 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0x92 -> HEBREW LETTER AYIN
u'\u05e3' # 0x93 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x94 -> HEBREW LETTER PE
u'\u05e5' # 0x95 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x96 -> HEBREW LETTER TSADI
u'\u05e7' # 0x97 -> HEBREW LETTER QOF
u'\u05e8' # 0x98 -> HEBREW LETTER RESH
u'\u05e9' # 0x99 -> HEBREW LETTER SHIN
u'\u05ea' # 0x9A -> HEBREW LETTER TAV
u'\ufffe' # 0x9B -> UNDEFINED
u'\xa3' # 0x9C -> POUND SIGN
u'\ufffe' # 0x9D -> UNDEFINED
u'\xd7' # 0x9E -> MULTIPLICATION SIGN
u'\ufffe' # 0x9F -> UNDEFINED
u'\ufffe' # 0xA0 -> UNDEFINED
u'\ufffe' # 0xA1 -> UNDEFINED
u'\ufffe' # 0xA2 -> UNDEFINED
u'\ufffe' # 0xA3 -> UNDEFINED
u'\ufffe' # 0xA4 -> UNDEFINED
u'\ufffe' # 0xA5 -> UNDEFINED
u'\ufffe' # 0xA6 -> UNDEFINED
u'\ufffe' # 0xA7 -> UNDEFINED
u'\ufffe' # 0xA8 -> UNDEFINED
u'\xae' # 0xA9 -> REGISTERED SIGN
u'\xac' # 0xAA -> NOT SIGN
u'\xbd' # 0xAB -> VULGAR FRACTION ONE HALF
u'\xbc' # 0xAC -> VULGAR FRACTION ONE QUARTER
u'\ufffe' # 0xAD -> UNDEFINED
u'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0xB0 -> LIGHT SHADE
u'\u2592' # 0xB1 -> MEDIUM SHADE
u'\u2593' # 0xB2 -> DARK SHADE
u'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\ufffe' # 0xB5 -> UNDEFINED
u'\ufffe' # 0xB6 -> UNDEFINED
u'\ufffe' # 0xB7 -> UNDEFINED
u'\xa9' # 0xB8 -> COPYRIGHT SIGN
u'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0xBD -> CENT SIGN
u'\xa5' # 0xBE -> YEN SIGN
u'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\ufffe' # 0xC6 -> UNDEFINED
u'\ufffe' # 0xC7 -> UNDEFINED
u'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0xCF -> CURRENCY SIGN
u'\ufffe' # 0xD0 -> UNDEFINED
u'\ufffe' # 0xD1 -> UNDEFINED
u'\ufffe' # 0xD2 -> UNDEFINED
u'\ufffe' # 0xD3 -> UNDEFINEDS
u'\ufffe' # 0xD4 -> UNDEFINED
u'\ufffe' # 0xD5 -> UNDEFINED
u'\ufffe' # 0xD6 -> UNDEFINEDE
u'\ufffe' # 0xD7 -> UNDEFINED
u'\ufffe' # 0xD8 -> UNDEFINED
u'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0xDB -> FULL BLOCK
u'\u2584' # 0xDC -> LOWER HALF BLOCK
u'\xa6' # 0xDD -> BROKEN BAR
u'\ufffe' # 0xDE -> UNDEFINED
u'\u2580' # 0xDF -> UPPER HALF BLOCK
u'\ufffe' # 0xE0 -> UNDEFINED
u'\ufffe' # 0xE1 -> UNDEFINED
u'\ufffe' # 0xE2 -> UNDEFINED
u'\ufffe' # 0xE3 -> UNDEFINED
u'\ufffe' # 0xE4 -> UNDEFINED
u'\ufffe' # 0xE5 -> UNDEFINED
u'\xb5' # 0xE6 -> MICRO SIGN
u'\ufffe' # 0xE7 -> UNDEFINED
u'\ufffe' # 0xE8 -> UNDEFINED
u'\ufffe' # 0xE9 -> UNDEFINED
u'\ufffe' # 0xEA -> UNDEFINED
u'\ufffe' # 0xEB -> UNDEFINED
u'\ufffe' # 0xEC -> UNDEFINED
u'\ufffe' # 0xED -> UNDEFINED
u'\xaf' # 0xEE -> MACRON
u'\xb4' # 0xEF -> ACUTE ACCENT
u'\xad' # 0xF0 -> SOFT HYPHEN
u'\xb1' # 0xF1 -> PLUS-MINUS SIGN
u'\u2017' # 0xF2 -> DOUBLE LOW LINE
u'\xbe' # 0xF3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0xF4 -> PILCROW SIGN
u'\xa7' # 0xF5 -> SECTION SIGN
u'\xf7' # 0xF6 -> DIVISION SIGN
u'\xb8' # 0xF7 -> CEDILLA
u'\xb0' # 0xF8 -> DEGREE SIGN
u'\xa8' # 0xF9 -> DIAERESIS
u'\xb7' # 0xFA -> MIDDLE DOT
u'\xb9' # 0xFB -> SUPERSCRIPT ONE
u'\xb3' # 0xFC -> SUPERSCRIPT THREE
u'\xb2' # 0xFD -> SUPERSCRIPT TWO
u'\u25a0' # 0xFE -> BLACK SQUARE
u'\xa0' # 0xFF -> NO-BREAK SPACE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
dcastro9/video_library_project | Django/django_openid_auth/admin.py | 2 | 3685 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2008-2010 Canonical Ltd.
# Copyright (C) 2010 Dave Walker
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from django.conf import settings
from django.contrib import admin
from django_openid_auth import conf
from django_openid_auth.models import Nonce, Association, UserOpenID
from django_openid_auth.store import DjangoOpenIDStore
class NonceAdmin(admin.ModelAdmin):
list_display = ('server_url', 'timestamp')
actions = ['cleanup_nonces']
def cleanup_nonces(self, request, queryset):
store = DjangoOpenIDStore()
count = store.cleanupNonces()
self.message_user(request, "%d expired nonces removed" % count)
cleanup_nonces.short_description = "Clean up expired nonces"
admin.site.register(Nonce, NonceAdmin)
class AssociationAdmin(admin.ModelAdmin):
list_display = ('server_url', 'assoc_type')
list_filter = ('assoc_type',)
search_fields = ('server_url',)
actions = ['cleanup_associations']
def cleanup_associations(self, request, queryset):
store = DjangoOpenIDStore()
count = store.cleanupAssociations()
self.message_user(request, "%d expired associations removed" % count)
cleanup_associations.short_description = "Clean up expired associations"
admin.site.register(Association, AssociationAdmin)
class UserOpenIDAdmin(admin.ModelAdmin):
list_display = ('user', 'claimed_id')
search_fields = ('claimed_id',)
admin.site.register(UserOpenID, UserOpenIDAdmin)
# Support for allowing openid authentication for /admin (django.contrib.admin)
if conf.USE_AS_ADMIN_LOGIN:
from django.http import HttpResponseRedirect
from django_openid_auth import views
def _openid_login(self, request, error_message='', extra_context=None):
if request.user.is_authenticated():
if not request.user.is_staff:
return views.render_failure(
request, "User %s does not have admin access."
% request.user.username)
return views.render_failure(
request, "Unknown Error: %s" % error_message)
else:
# Redirect to openid login path,
return HttpResponseRedirect(
settings.LOGIN_URL + "?next=" + request.get_full_path())
# Overide the standard admin login form.
admin.sites.AdminSite.display_login_form = _openid_login
| mit |
wesley1001/dokomoforms | dokomoforms/db/question_branch.py | 1 | 3010 | """Allow access to the question_branch table."""
from sqlalchemy.engine import ResultProxy, Connection
from sqlalchemy.sql import Insert, select
from dokomoforms.db import question_branch_table
def get_branches(connection: Connection, question_id: str) -> ResultProxy:
"""
Get all the branches for a question identified by question_id.
:param connection: a SQLAlchemy Connection
:param question_id: foreign key
:return: an iterable of the branches (RowProxy)
"""
select_stmt = select([question_branch_table])
where_stmt = select_stmt.where(
question_branch_table.c.from_question_id == question_id)
return connection.execute(where_stmt)
def question_branch_insert(*,
question_choice_id: str,
from_question_id: str,
from_type_constraint: str,
from_sequence_number: str,
from_allow_multiple: str,
from_survey_id: str,
to_question_id: str,
to_type_constraint: str,
to_sequence_number: str,
to_allow_multiple: str,
to_survey_id: str) -> Insert:
"""
Insert a record into the question_branch table. A question branch is
associated with a choice associated with a question (the from question)
and another question (the to question). Make sure to use a transaction!
:param question_choice_id: the UUID of the choice
:param from_question_id: the UUID of the from question
:param from_type_constraint: the type constraint of the from question
:param from_sequence_number: the sequence number of the from question
:param from_allow_multiple: whether the from question allows multiple
:param from_survey_id: the UUID of the survey of the from question
:param to_question_id: the UUID of the to question
:param to_type_constraint: the type constraint of the to question
:param to_sequence_number: the sequence number of the to question
:param to_allow_multiple: whether the to question allows multiple
:param to_survey_id: the UUID of the survey of the to question
:return: an Insert object. Execute this!
"""
values = {'question_choice_id': question_choice_id,
'from_question_id': from_question_id,
'from_type_constraint': from_type_constraint,
'from_sequence_number': from_sequence_number,
'from_allow_multiple': from_allow_multiple,
'from_survey_id': from_survey_id,
'to_question_id': to_question_id,
'to_type_constraint': to_type_constraint,
'to_sequence_number': to_sequence_number,
'to_allow_multiple': to_allow_multiple,
'to_survey_id': to_survey_id}
return question_branch_table.insert().values(values)
class MultipleBranchError(Exception):
pass
| gpl-3.0 |
JulianEberius/SublimePythonIDE | server/lib/python_all/jedi/parser/pgen2/parse.py | 38 | 8124 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2014 David Halter. Integration into Jedi.
# Modifications are dual-licensed: MIT and PSF.
"""
Parser engine for the grammar tables generated by pgen.
The grammar table must be loaded first.
See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.
"""
# Local imports
from jedi.parser import tokenize
class ParseError(Exception):
"""Exception to signal the parser is stuck."""
def __init__(self, msg, type, value, start_pos):
Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" %
(msg, tokenize.tok_name[type], value, start_pos))
self.msg = msg
self.type = type
self.value = value
self.start_pos = start_pos
class PgenParser(object):
"""Parser engine.
The proper usage sequence is:
p = Parser(grammar, [converter]) # create instance
p.setup([start]) # prepare for parsing
<for each input token>:
if p.addtoken(...): # parse a token; may raise ParseError
break
root = p.rootnode # root of abstract syntax tree
A Parser instance may be reused by calling setup() repeatedly.
A Parser instance contains state pertaining to the current token
sequence, and should not be used concurrently by different threads
to parse separate token sequences.
See driver.py for how to get input tokens by tokenizing a file or
string.
Parsing is complete when addtoken() returns True; the root of the
abstract syntax tree can then be retrieved from the rootnode
instance variable. When a syntax error occurs, addtoken() raises
the ParseError exception. There is no error recovery; the parser
cannot be used after a syntax error was reported (but it can be
reinitialized by calling setup()).
"""
def __init__(self, grammar, convert_node, convert_leaf, error_recovery):
"""Constructor.
The grammar argument is a grammar.Grammar instance; see the
grammar module for more information.
The parser is not ready yet for parsing; you must call the
setup() method to get it started.
The optional convert argument is a function mapping concrete
syntax tree nodes to abstract syntax tree nodes. If not
given, no conversion is done and the syntax tree produced is
the concrete syntax tree. If given, it must be a function of
two arguments, the first being the grammar (a grammar.Grammar
instance), and the second being the concrete syntax tree node
to be converted. The syntax tree is converted from the bottom
up.
A concrete syntax tree node is a (type, nodes) tuple, where
type is the node type (a token or symbol number) and nodes
is a list of children for symbols, and None for tokens.
An abstract syntax tree node may be anything; this is entirely
up to the converter function.
"""
self.grammar = grammar
self.convert_node = convert_node
self.convert_leaf = convert_leaf
# Prepare for parsing.
start = self.grammar.start
# Each stack entry is a tuple: (dfa, state, node).
# A node is a tuple: (type, children),
# where children is a list of nodes or None
newnode = (start, [])
stackentry = (self.grammar.dfas[start], 0, newnode)
self.stack = [stackentry]
self.rootnode = None
self.error_recovery = error_recovery
def parse(self, tokenizer):
for type, value, prefix, start_pos in tokenizer:
if self.addtoken(type, value, prefix, start_pos):
break
else:
# We never broke out -- EOF is too soon -- Unfinished statement.
self.error_recovery(self.grammar, self.stack, type, value,
start_pos, prefix, self.addtoken)
# Add the ENDMARKER again.
if not self.addtoken(type, value, prefix, start_pos):
raise ParseError("incomplete input", type, value, start_pos)
return self.rootnode
def addtoken(self, type, value, prefix, start_pos):
"""Add a token; return True if this is the end of the program."""
# Map from token to label
if type == tokenize.NAME:
# Check for reserved words (keywords)
try:
ilabel = self.grammar.keywords[value]
except KeyError:
ilabel = self.grammar.tokens[type]
else:
ilabel = self.grammar.tokens[type]
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
t, v = self.grammar.labels[i]
if ilabel == i:
# Look it up in the list of labels
assert t < 256
# Shift a token; we're done with it
self.shift(type, value, newstate, prefix, start_pos)
# Pop while we are in an accept-only state
state = newstate
while states[state] == [(0, state)]:
self.pop()
if not self.stack:
# Done parsing!
return True
dfa, state, node = self.stack[-1]
states, first = dfa
# Done with this token
return False
elif t >= 256:
# See if it's a symbol and if we're in its first set
itsdfa = self.grammar.dfas[t]
itsstates, itsfirst = itsdfa
if ilabel in itsfirst:
# Push a symbol
self.push(t, itsdfa, newstate)
break # To continue the outer while loop
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
self.pop()
if not self.stack:
# Done parsing, but another token is input
raise ParseError("too much input", type, value, start_pos)
else:
self.error_recovery(self.grammar, self.stack, type,
value, start_pos, prefix, self.addtoken)
break
def shift(self, type, value, newstate, prefix, start_pos):
"""Shift a token. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = self.convert_leaf(self.grammar, type, value, prefix, start_pos)
node[-1].append(newnode)
self.stack[-1] = (dfa, newstate, node)
def push(self, type, newdfa, newstate):
"""Push a nonterminal. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = (type, [])
self.stack[-1] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
def pop(self):
"""Pop a nonterminal. (Internal)"""
popdfa, popstate, (type, children) = self.stack.pop()
# If there's exactly one child, return that child instead of creating a
# new node. We still create expr_stmt and file_input though, because a
# lot of Jedi depends on its logic.
if len(children) == 1:
newnode = children[0]
else:
newnode = self.convert_node(self.grammar, type, children)
try:
# Equal to:
# dfa, state, node = self.stack[-1]
# symbol, children = node
self.stack[-1][2][1].append(newnode)
except IndexError:
# Stack is empty, set the rootnode.
self.rootnode = newnode
| gpl-2.0 |
ViDA-NYU/reprozip | reprounzip-qt/reprounzip_qt/gui/common.py | 1 | 2532 | # Copyright (C) 2014-2017 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
from __future__ import division, print_function, unicode_literals
from qtpy import QtCore, QtWidgets
import re
def error_msg(parent, message, severity, details=None):
if severity == 'information':
icon = QtWidgets.QMessageBox.Information
elif severity == 'warning':
icon = QtWidgets.QMessageBox.Warning
else:
icon = QtWidgets.QMessageBox.Critical
msgbox = QtWidgets.QMessageBox(icon, "Error", message,
QtWidgets.QMessageBox.Ok, parent,
detailedText=details,
textFormat=QtCore.Qt.PlainText)
msgbox.exec_()
def handle_error(parent, result):
if result in (True, False):
return result
else:
error_msg(parent, *result)
return False
class ResizableStack(QtWidgets.QStackedWidget):
# See http://stackoverflow.com/a/14485901/711380
def __init__(self, **kwargs):
super(ResizableStack, self).__init__(**kwargs)
self.currentChanged[int].connect(self._current_changed)
def addWidget(self, widget):
widget.setSizePolicy(QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
super(ResizableStack, self).addWidget(widget)
def _current_changed(self, idx):
widget = self.widget(idx)
widget.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
widget.adjustSize()
self.adjustSize()
class ROOT(object):
OPTION_TO_INDEX = {None: 0, 'sudo': 1, 'su': 2}
INDEX_TO_OPTION = {0: None, 1: 'sudo', 2: 'su'}
TEXT = ["no", "with sudo", "with su"]
_port_re = re.compile('^(?:([0-9]+):)?([0-9]+)(?:/([a-z]+))?$')
def parse_ports(string, widget):
ports = []
for port in string.split():
port = port.strip()
if not port:
continue
m = _port_re.match(port)
if m is None:
error_msg(widget, "Invalid port specification: '%s'" % port,
'warning')
return None
else:
host, experiment, proto = m.groups()
if not host:
host = experiment
if not proto:
proto = 'tcp'
ports.append((int(host), int(experiment), proto))
return ports
| bsd-3-clause |
devendermishrajio/nova_test_latest | nova/api/openstack/compute/schemas/v3/networks.py | 79 | 2750 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'network': {
'type': 'object',
'properties': {
'label': {
'type': 'string', 'maxLength': 255
},
'ipam': parameter_types.boolean,
'cidr': parameter_types.cidr,
'cidr_v6': parameter_types.cidr,
'project_id': parameter_types.project_id,
'multi_host': parameter_types.boolean,
'gateway': parameter_types.ipv4,
'gateway_v6': parameter_types.ipv6,
'bridge': {
'type': 'string',
},
'bridge_interface': {
'type': 'string',
},
# NOTE: In _extract_subnets(), dns1, dns2 dhcp_server are
# used only for IPv4, not IPv6.
'dns1': parameter_types.ipv4,
'dns2': parameter_types.ipv4,
'dhcp_server': parameter_types.ipv4,
'fixed_cidr': parameter_types.cidr,
'allowed_start': parameter_types.ip_address,
'allowed_end': parameter_types.ip_address,
'enable_dhcp': parameter_types.boolean,
'share_address': parameter_types.boolean,
'mtu': parameter_types.positive_integer,
'vlan': parameter_types.positive_integer,
'vlan_start': parameter_types.positive_integer,
'vpn_start': {
'type': 'string',
},
},
'required': ['label'],
'oneOf': [
{'required': ['cidr']},
{'required': ['cidr_v6']}
],
'additionalProperties': False,
},
},
'required': ['network'],
'additionalProperties': False,
}
add_network_to_project = {
'type': 'object',
'properties': {
'id': {'type': ['string', 'null']}
},
'required': ['id'],
'additionalProperties': False
}
| apache-2.0 |
qqzwc/XX-Net | code/default/python27/1.0/lib/noarch/enum/__init__.py | 6 | 30981 | """Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 1, 3
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
try:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
__order__ = [name for name in sorted(members.keys())]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = set([a for b in bases for a in b.__dict__])
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __bool__(cls):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, module=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
__nonzero__ = __bool__
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+start) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
item = None # in case names is empty
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
if pyver >= 3.0:
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(_sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = dict((name, value) for name, value in source.items() if filter(name))
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
temp_enum_dict['_convert'] = _convert
del _convert
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| bsd-2-clause |
nilmtk/nilmtk | nilmtk/stats/dropoutrateresults.py | 8 | 1783 | import matplotlib.pyplot as plt
from ..results import Results
from ..consts import SECS_PER_DAY
class DropoutRateResults(Results):
"""
Attributes
----------
_data : pd.DataFrame
index is start date for the whole chunk
`end` is end date for the whole chunk
`dropout_rate` is float [0,1]
`n_samples` is int, used for calculating weighted mean
"""
name = "dropout_rate"
def combined(self):
"""Calculates weighted average.
Returns
-------
dropout_rate : float, [0,1]
"""
tot_samples = self._data['n_samples'].sum()
proportion = self._data['n_samples'] / tot_samples
dropout_rate = (self._data['dropout_rate'] * proportion).sum()
return dropout_rate
def unify(self, other):
super(DropoutRateResults, self).unify(other)
for i, row in self._data.iterrows():
# store mean of dropout rate
self._data['dropout_rate'].loc[i] += other._data['dropout_rate'].loc[i]
self._data['dropout_rate'].loc[i] /= 2
self._data['n_samples'].loc[i] += other._data['n_samples'].loc[i]
def to_dict(self):
return {'statistics': {'dropout_rate': self.combined()}}
def plot(self, ax=None):
if ax is None:
ax = plt.gca()
ax.xaxis.axis_date()
for index, row in self._data.iterrows():
length = (row['end'] - index).total_seconds() / SECS_PER_DAY
rect = plt.Rectangle((index, 0), # bottom left corner
length,
row['dropout_rate'], # width
color='b')
ax.add_patch(rect)
ax.autoscale_view()
| apache-2.0 |
SEL-Columbia/commcare-hq | corehq/apps/reports/urls.py | 1 | 4594 | import logging
from django.conf.urls.defaults import *
from django.core.exceptions import ImproperlyConfigured
from corehq.apps.reports.util import get_installed_custom_modules
from corehq.apps.reports.dispatcher import (ProjectReportDispatcher,
CustomProjectReportDispatcher, BasicReportDispatcher)
# from .filters.urls import urlpatterns as filter_urls
from .filters import urls as filter_urls
custom_report_urls = patterns('',
CustomProjectReportDispatcher.url_pattern(),
)
urlpatterns = patterns('corehq.apps.reports.views',
url(r'^$', "default", name="reports_home"),
url(r'^saved/', "saved_reports", name="saved_reports"),
url(r'^saved_reports', 'old_saved_reports'),
url(r'^case_data/(?P<case_id>[\w\-]+)/$', 'case_details', name="case_details"),
url(r'^case_data/(?P<case_id>[\w\-]+)/view/xml/$', 'case_xml', name="single_case_xml"),
url(r'^case_data/(?P<case_id>[\w\-]+)/rebuild/$', 'rebuild_case_view', name="rebuild_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/close/$', 'close_case_view', name="close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/undo-close/$', 'undo_close_case_view', name="undo_close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/(?P<xform_id>[\w\-:]+)/$', 'case_form_data', name="case_form_data"),
# Download and view form data
url(r'^form_data/(?P<instance_id>[\w\-:]+)/$', 'form_data', name='render_form_data'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/download/$', 'download_form', name='download_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/download-attachment/$',
'download_attachment', name='download_attachment'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/archive/$', 'archive_form', name='archive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/unarchive/$', 'unarchive_form', name='unarchive_form'),
# export API
url(r"^export/$", 'export_data'),
# Download Exports
# todo should eventually be moved to corehq.apps.export
# Custom
url(r"^export/custom/(?P<export_id>[\w\-]+)/download/$", 'export_default_or_custom_data', name="export_custom_data"),
# Default
url(r"^export/default/download/$", "export_default_or_custom_data", name="export_default_data"),
# Bulk
url(r"^export/bulk/download/$", "export_default_or_custom_data", name="export_bulk_download", kwargs=dict(bulk_export=True)),
# saved
url(r"^export/saved/download/(?P<export_id>[\w\-]+)/$", "hq_download_saved_export", name="hq_download_saved_export"),
url(r"^export/saved/update/$", "hq_update_saved_export", name="hq_update_saved_export"),
# Full Excel export
url(r'^full_excel_export/(?P<export_hash>[\w\-]+)/(?P<format>[\w\-]+)$', "export_report", name="export_report"),
# once off email
url(r"^email_onceoff/(?P<report_slug>[\w_]+)/$", 'email_report'),
url(r"^custom/email_onceoff/(?P<report_slug>[\w_]+)/$", 'email_report',
kwargs=dict(report_type=CustomProjectReportDispatcher.prefix)),
# Saved reports
url(r"^configs$", 'add_config', name='add_report_config'),
url(r"^configs/(?P<config_id>[\w-]+)$", 'delete_config',
name='delete_report_config'),
# Scheduled reports
url(r'^scheduled_reports/(?P<scheduled_report_id>[\w-]+)?$',
'edit_scheduled_report', name="edit_scheduled_report"),
url(r'^scheduled_report/(?P<scheduled_report_id>[\w-]+)/delete$',
'delete_scheduled_report', name='delete_scheduled_report'),
url(r'^send_test_scheduled_report/(?P<scheduled_report_id>[\w-]+)/$',
'send_test_scheduled_report', name='send_test_scheduled_report'),
url(r'^view_scheduled_report/(?P<scheduled_report_id>[\w_]+)/$',
'view_scheduled_report', name='view_scheduled_report'),
# Internal Use
url(r"^export/forms/all/$", 'export_all_form_metadata', name="export_all_form_metadata"),
url(r"^export/forms/all/async/$", 'export_all_form_metadata_async', name="export_all_form_metadata_async"),
url(r'^download/cases/$', 'download_cases', name='download_cases'),
url(r'^custom/', include(custom_report_urls)),
url(r'^filters/', include(filter_urls)),
ProjectReportDispatcher.url_pattern(),
)
report_urls = patterns('',
BasicReportDispatcher.url_pattern(),
)
for module in get_installed_custom_modules():
module_name = module.__name__.split('.')[-1]
try:
custom_report_urls += patterns('',
(r"^%s/" % module_name, include('{0}.urls'.format(module.__name__))),
)
except ImproperlyConfigured:
logging.info("Module %s does not provide urls" % module_name)
| bsd-3-clause |
indico/indico | indico/migrations/env.py | 4 | 3063 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import logging.config
from alembic import context
from flask import current_app
from sqlalchemy import engine_from_config, pool
from indico.core.db.sqlalchemy.util.models import import_all_models
# Ensure all our models are imported
import_all_models()
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
logging.config.fileConfig(config.config_file_name)
config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
def _include_object(object_, name, type_, reflected, compare_to):
if type_ != 'table':
return True
if object_.schema and object_.schema.startswith('plugin_'):
return False
return name != 'alembic_version' and not name.startswith('alembic_version_')
def _render_item(type_, obj, autogen_context):
if hasattr(obj, 'info') and obj.info.get('alembic_dont_render'):
return None
func = getattr(obj, 'alembic_render_' + type_, None)
if func is None:
return False
return func(autogen_context, autogen_context.opts['template_args']['toplevel_code'])
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option('sqlalchemy.url')
context.configure(url=url, target_metadata=target_metadata, include_schemas=True,
version_table_schema='public', include_object=_include_object, render_item=_render_item,
template_args={'toplevel_code': set()})
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection, target_metadata=target_metadata, include_schemas=True,
version_table_schema='public', include_object=_include_object, render_item=_render_item,
template_args={'toplevel_code': set()})
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit |
kalahbrown/HueBigSQL | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/namedrange.py | 116 | 2415 | # file openpyxl/namedrange.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Track named groups of cells in a worksheet"""
# Python stdlib imports
import re
# package imports
from .shared.exc import NamedRangeException
# constants
NAMED_RANGE_RE = re.compile("'?([^']*)'?!((\$([A-Za-z]+))?\$([0-9]+)(:(\$([A-Za-z]+))?(\$([0-9]+)))?)$")
class NamedRange(object):
"""A named group of cells"""
__slots__ = ('name', 'destinations', 'local_only')
def __init__(self, name, destinations):
self.name = name
self.destinations = destinations
self.local_only = False
def __str__(self):
return ','.join(['%s!%s' % (sheet, name) for sheet, name in self.destinations])
def __repr__(self):
return '<%s "%s">' % (self.__class__.__name__, str(self))
def split_named_range(range_string):
"""Separate a named range into its component parts"""
destinations = []
for range_string in range_string.split(','):
match = NAMED_RANGE_RE.match(range_string)
if not match:
raise NamedRangeException('Invalid named range string: "%s"' % range_string)
else:
sheet_name, xlrange = match.groups()[:2]
destinations.append((sheet_name, xlrange))
return destinations
| apache-2.0 |
chenzeyuczy/keba | src/lesson/views.py | 1 | 2227 | from django.shortcuts import render
from django.http import Http404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.decorators.http import require_http_methods, require_GET, require_POST
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from .models import Lesson
from comment.models import Comment
from meterial.models import Meterial
from api.myLesson import MyLesson
from api.sysu import Sysuer
from api.parser import parseResultOfCourseSelection, parseScore
# Create your views here.
@require_GET
@login_required
def lessons_list(request):
search = request.GET.get('p', None)
if search is not None:
all_lessons = Lesson.objects.filter(Q(title__contains=search) | Q(teacher__contains=search))
else:
all_lessons = Lesson.objects.all()
paginator = Paginator(all_lessons, 25)
page = request.GET.get('page')
try:
lessons = paginator.page(page)
except PageNotAnInteger:
lessons = paginator.page(1)
except EmptyPage:
lessons = paginator.page(paginator.num_pages)
return render(request, 'lesson/lessons_list.html', {
'lessons': lessons,
'search': search,
})
@login_required
@require_GET
def lesson_detail(request, lesson_id):
try:
lesson = Lesson.objects.get(pk=lesson_id)
except Lesson.DoesNotExist:
raise Http404('Lesson does not exist...')
user = request.user
comments = Comment.objects.filter(user=user).filter(lesson=lesson)
meterials = Meterial.objects.filter(lesson=lesson)
return render(request, 'lesson/lesson_detail.html', {
'lesson': lesson,
'comments': comments,
'meterials': meterials,
})
@login_required
@require_GET
def select_result(request):
user = Sysuer(username=request.user.username,
cookie=request.session['cookie'])
lessons = parseResultOfCourseSelection(user)
for x in lessons:
try:
lesson = Lesson.objects.get(lessonId=x['kch'], teacher=x['xm'])
x.set('id', lesson.id)
except:
pass
return render(request, 'lesson/select_result.html', {
'lessons': lessons,
})
| gpl-2.0 |
Xeralux/tensorflow | tensorflow/contrib/gan/python/namedtuples.py | 17 | 6624 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Named tuples for TFGAN.
TFGAN training occurs in four steps, and each step communicates with the next
step via one of these named tuples. At each step, you can either use a TFGAN
helper function in `train.py`, or you can manually construct a tuple.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
__all__ = [
'GANModel',
'InfoGANModel',
'ACGANModel',
'CycleGANModel',
'GANLoss',
'CycleGANLoss',
'GANTrainOps',
'GANTrainSteps',
]
class GANModel(
collections.namedtuple('GANModel', (
'generator_inputs',
'generated_data',
'generator_variables',
'generator_scope',
'generator_fn',
'real_data',
'discriminator_real_outputs',
'discriminator_gen_outputs',
'discriminator_variables',
'discriminator_scope',
'discriminator_fn',
))):
"""A GANModel contains all the pieces needed for GAN training.
Generative Adversarial Networks (https://arxiv.org/abs/1406.2661) attempt
to create an implicit generative model of data by solving a two agent game.
The generator generates candidate examples that are supposed to match the
data distribution, and the discriminator aims to tell the real examples
apart from the generated samples.
Args:
generator_inputs: The random noise source that acts as input to the
generator.
generated_data: The generated output data of the GAN.
generator_variables: A list of all generator variables.
generator_scope: Variable scope all generator variables live in.
generator_fn: The generator function.
real_data: A tensor or real data.
discriminator_real_outputs: The discriminator's output on real data.
discriminator_gen_outputs: The discriminator's output on generated data.
discriminator_variables: A list of all discriminator variables.
discriminator_scope: Variable scope all discriminator variables live in.
discriminator_fn: The discriminator function.
"""
# TODO(joelshor): Have this class inherit from `GANModel`.
class InfoGANModel(
collections.namedtuple('InfoGANModel', GANModel._fields + (
'structured_generator_inputs',
'predicted_distributions',
'discriminator_and_aux_fn',
))):
"""An InfoGANModel contains all the pieces needed for InfoGAN training.
See https://arxiv.org/abs/1606.03657 for more details.
Args:
structured_generator_inputs: A list of Tensors representing the random noise
that must have high mutual information with the generator output. List
length should match `predicted_distributions`.
predicted_distributions: A list of tf.Distributions. Predicted by the
recognizer, and used to evaluate the likelihood of the structured noise.
List length should match `structured_generator_inputs`.
discriminator_and_aux_fn: The original discriminator function that returns
a tuple of (logits, `predicted_distributions`).
"""
class ACGANModel(
collections.namedtuple('ACGANModel', GANModel._fields +
('one_hot_labels',
'discriminator_real_classification_logits',
'discriminator_gen_classification_logits',))):
"""An ACGANModel contains all the pieces needed for ACGAN training.
See https://arxiv.org/abs/1610.09585 for more details.
Args:
one_hot_labels: A Tensor holding one-hot-labels for the batch.
discriminator_real_classification_logits: Classification logits for real
data.
discriminator_gen_classification_logits: Classification logits for generated
data.
"""
class CycleGANModel(
collections.namedtuple(
'CycleGANModel',
('model_x2y', 'model_y2x', 'reconstructed_x', 'reconstructed_y'))):
"""An CycleGANModel contains all the pieces needed for CycleGAN training.
The model `model_x2y` generator F maps data set X to Y, while the model
`model_y2x` generator G maps data set Y to X.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
model_x2y: A `GANModel` namedtuple whose generator maps data set X to Y.
model_y2x: A `GANModel` namedtuple whose generator maps data set Y to X.
reconstructed_x: A `Tensor` of reconstructed data X which is G(F(X)).
reconstructed_y: A `Tensor` of reconstructed data Y which is F(G(Y)).
"""
class GANLoss(
collections.namedtuple('GANLoss', (
'generator_loss',
'discriminator_loss'
))):
"""GANLoss contains the generator and discriminator losses.
Args:
generator_loss: A tensor for the generator loss.
discriminator_loss: A tensor for the discriminator loss.
"""
class CycleGANLoss(
collections.namedtuple('CycleGANLoss', ('loss_x2y', 'loss_y2x'))):
"""CycleGANLoss contains the losses for `CycleGANModel`.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
loss_x2y: A `GANLoss` namedtuple representing the loss of `model_x2y`.
loss_y2x: A `GANLoss` namedtuple representing the loss of `model_y2x`.
"""
class GANTrainOps(
collections.namedtuple('GANTrainOps', (
'generator_train_op',
'discriminator_train_op',
'global_step_inc_op'
))):
"""GANTrainOps contains the training ops.
Args:
generator_train_op: Op that performs a generator update step.
discriminator_train_op: Op that performs a discriminator update step.
global_step_inc_op: Op that increments the shared global step.
"""
class GANTrainSteps(
collections.namedtuple('GANTrainSteps', (
'generator_train_steps',
'discriminator_train_steps'
))):
"""Contains configuration for the GAN Training.
Args:
generator_train_steps: Number of generator steps to take in each GAN step.
discriminator_train_steps: Number of discriminator steps to take in each GAN
step.
"""
| apache-2.0 |
azurestandard/django | tests/regressiontests/cache/tests.py | 8 | 76440 | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import absolute_import, unicode_literals
import hashlib
import os
import re
import tempfile
import time
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import get_cache, DEFAULT_CACHE_ALIAS
from django.core.cache.backends.base import (CacheKeyWarning,
InvalidCacheBackendError)
from django.db import router
from django.http import HttpResponse, HttpRequest, QueryDict
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory
from django.test.utils import (get_warnings_state, restore_warnings_state,
override_settings)
from django.utils import timezone, translation, unittest
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_unicode
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class DummyCacheTests(unittest.TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has different test requirements.
backend_name = 'django.core.cache.backends.dummy.DummyCache'
def setUp(self):
self.cache = get_cache(self.backend_name)
def test_simple(self):
"Dummy cache backend ignores cache set calls"
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(self.cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), None)
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), False)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, False)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.incr, 'answer')
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.decr, 'answer')
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), None)
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x' : 1 }
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.cache.set_many({'a': 1, 'b': 2})
self.cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
self.cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
self.cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.incr_version, 'answer')
self.assertRaises(ValueError, self.cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.decr_version, 'answer')
self.assertRaises(ValueError, self.cache.decr_version, 'does_not_exist')
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def _get_request_cache(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = path
request._cache_update_cache = True
request.method = 'GET'
return request
def test_simple(self):
# Simple cache set/get works
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(self.cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
self.cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(self.prefix_cache.has_key('somekey'))
self.prefix_cache.set('somekey', 'value2')
self.assertEqual(self.cache.get('somekey'), 'value')
self.assertEqual(self.prefix_cache.get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {'a' : 'a', 'c' : 'c', 'd' : 'd'})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {'a' : 'a', 'b' : 'b'})
def test_delete(self):
# Cache keys can be deleted
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), "spam")
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), True)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspect cache contents
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, True)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
# Cache values can be incremented
self.cache.set('answer', 41)
self.assertEqual(self.cache.incr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.incr('answer', 10), 52)
self.assertEqual(self.cache.get('answer'), 52)
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
self.cache.set('answer', 43)
self.assertEqual(self.cache.decr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.decr('answer', 10), 32)
self.assertEqual(self.cache.get('answer'), 32)
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
self.cache.set('question', my_poll)
cached_poll = self.cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
self.cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cached_polls = self.cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), "newvalue")
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x' : 1 }
}
# Test `set`
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
self.cache.delete(key)
self.cache.add(key, value)
self.assertEqual(self.cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
self.cache.delete(key)
self.cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(self.cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value)
# Test set
self.cache.set('binary1', compressed_value)
compressed_result = self.cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result))
# Test add
self.cache.add('binary1-add', compressed_value)
compressed_result = self.cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result))
# Test set_many
self.cache.set_many({'binary1-set_many': compressed_value})
compressed_result = self.cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result))
def test_set_many(self):
# Multiple keys can be set using set_many
self.cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(self.cache.get("key1"), "spam")
self.assertEqual(self.cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
self.cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.set("key3", "ham")
self.cache.delete_many(["key1", "key2"])
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
self.assertEqual(self.cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.clear()
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
self.cache.set('key1', 'eggs', 60*60*24*30 + 1) #30 days + 1 second
self.assertEqual(self.cache.get('key1'), 'eggs')
self.cache.add('key2', 'ham', 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key2'), 'ham')
self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key3'), 'sausage')
self.assertEqual(self.cache.get('key4'), 'lobster bisque')
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
self.cache.set("key1", "spam", 100.2)
self.assertEqual(self.cache.get("key1"), "spam")
def perform_cull_test(self, initial_count, final_count):
"""This is implemented as a utility method, because only some of the backends
implement culling. The culling algorithm also varies slightly, so the final
number of entries will vary between backends"""
# Create initial cache key entries. This will overflow the cache, causing a cull
for i in range(1, initial_count):
self.cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if self.cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = self.cache.key_func
self.cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
self.cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertTrue(isinstance(w[0].message, CacheKeyWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
self.cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertTrue(isinstance(w[0].message, CacheKeyWarning))
finally:
self.cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
self.cache.set('answer1', 42)
self.assertEqual(self.cache.get('answer1'), 42)
self.assertEqual(self.cache.get('answer1', version=1), 42)
self.assertEqual(self.cache.get('answer1', version=2), None)
self.assertEqual(self.v2_cache.get('answer1'), None)
self.assertEqual(self.v2_cache.get('answer1', version=1), 42)
self.assertEqual(self.v2_cache.get('answer1', version=2), None)
# set, default version = 1, but manually override version = 2
self.cache.set('answer2', 42, version=2)
self.assertEqual(self.cache.get('answer2'), None)
self.assertEqual(self.cache.get('answer2', version=1), None)
self.assertEqual(self.cache.get('answer2', version=2), 42)
self.assertEqual(self.v2_cache.get('answer2'), 42)
self.assertEqual(self.v2_cache.get('answer2', version=1), None)
self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
# v2 set, using default version = 2
self.v2_cache.set('answer3', 42)
self.assertEqual(self.cache.get('answer3'), None)
self.assertEqual(self.cache.get('answer3', version=1), None)
self.assertEqual(self.cache.get('answer3', version=2), 42)
self.assertEqual(self.v2_cache.get('answer3'), 42)
self.assertEqual(self.v2_cache.get('answer3', version=1), None)
self.assertEqual(self.v2_cache.get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
self.v2_cache.set('answer4', 42, version=1)
self.assertEqual(self.cache.get('answer4'), 42)
self.assertEqual(self.cache.get('answer4', version=1), 42)
self.assertEqual(self.cache.get('answer4', version=2), None)
self.assertEqual(self.v2_cache.get('answer4'), None)
self.assertEqual(self.v2_cache.get('answer4', version=1), 42)
self.assertEqual(self.v2_cache.get('answer4', version=2), None)
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
self.cache.add('answer1', 42, version=2)
self.assertEqual(self.cache.get('answer1', version=1), None)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.add('answer1', 37, version=2)
self.assertEqual(self.cache.get('answer1', version=1), None)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.add('answer1', 37, version=1)
self.assertEqual(self.cache.get('answer1', version=1), 37)
self.assertEqual(self.cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
self.v2_cache.add('answer2', 42)
self.assertEqual(self.cache.get('answer2', version=1), None)
self.assertEqual(self.cache.get('answer2', version=2), 42)
self.v2_cache.add('answer2', 37)
self.assertEqual(self.cache.get('answer2', version=1), None)
self.assertEqual(self.cache.get('answer2', version=2), 42)
self.v2_cache.add('answer2', 37, version=1)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
self.v2_cache.add('answer3', 42, version=1)
self.assertEqual(self.cache.get('answer3', version=1), 42)
self.assertEqual(self.cache.get('answer3', version=2), None)
self.v2_cache.add('answer3', 37, version=1)
self.assertEqual(self.cache.get('answer3', version=1), 42)
self.assertEqual(self.cache.get('answer3', version=2), None)
self.v2_cache.add('answer3', 37)
self.assertEqual(self.cache.get('answer3', version=1), 42)
self.assertEqual(self.cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
self.cache.set('answer1', 42)
# has_key
self.assertTrue(self.cache.has_key('answer1'))
self.assertTrue(self.cache.has_key('answer1', version=1))
self.assertFalse(self.cache.has_key('answer1', version=2))
self.assertFalse(self.v2_cache.has_key('answer1'))
self.assertTrue(self.v2_cache.has_key('answer1', version=1))
self.assertFalse(self.v2_cache.has_key('answer1', version=2))
def test_cache_versioning_delete(self):
self.cache.set('answer1', 37, version=1)
self.cache.set('answer1', 42, version=2)
self.cache.delete('answer1')
self.assertEqual(self.cache.get('answer1', version=1), None)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.set('answer2', 37, version=1)
self.cache.set('answer2', 42, version=2)
self.cache.delete('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), None)
self.cache.set('answer3', 37, version=1)
self.cache.set('answer3', 42, version=2)
self.v2_cache.delete('answer3')
self.assertEqual(self.cache.get('answer3', version=1), 37)
self.assertEqual(self.cache.get('answer3', version=2), None)
self.cache.set('answer4', 37, version=1)
self.cache.set('answer4', 42, version=2)
self.v2_cache.delete('answer4', version=1)
self.assertEqual(self.cache.get('answer4', version=1), None)
self.assertEqual(self.cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
self.cache.set('answer1', 37, version=1)
self.cache.set('answer1', 42, version=2)
self.cache.incr('answer1')
self.assertEqual(self.cache.get('answer1', version=1), 38)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.decr('answer1')
self.assertEqual(self.cache.get('answer1', version=1), 37)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.set('answer2', 37, version=1)
self.cache.set('answer2', 42, version=2)
self.cache.incr('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), 43)
self.cache.decr('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), 42)
self.cache.set('answer3', 37, version=1)
self.cache.set('answer3', 42, version=2)
self.v2_cache.incr('answer3')
self.assertEqual(self.cache.get('answer3', version=1), 37)
self.assertEqual(self.cache.get('answer3', version=2), 43)
self.v2_cache.decr('answer3')
self.assertEqual(self.cache.get('answer3', version=1), 37)
self.assertEqual(self.cache.get('answer3', version=2), 42)
self.cache.set('answer4', 37, version=1)
self.cache.set('answer4', 42, version=2)
self.v2_cache.incr('answer4', version=1)
self.assertEqual(self.cache.get('answer4', version=1), 38)
self.assertEqual(self.cache.get('answer4', version=2), 42)
self.v2_cache.decr('answer4', version=1)
self.assertEqual(self.cache.get('answer4', version=1), 37)
self.assertEqual(self.cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
self.cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(self.cache.get_many(['ford1','arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(self.cache.get_many(['ford1','arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(self.cache.get_many(['ford1','arthur1'], version=2), {})
self.assertEqual(self.v2_cache.get_many(['ford1','arthur1']), {})
self.assertEqual(self.v2_cache.get_many(['ford1','arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(self.v2_cache.get_many(['ford1','arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
self.cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(self.cache.get_many(['ford2','arthur2']), {})
self.assertEqual(self.cache.get_many(['ford2','arthur2'], version=1), {})
self.assertEqual(self.cache.get_many(['ford2','arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(self.v2_cache.get_many(['ford2','arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(self.v2_cache.get_many(['ford2','arthur2'], version=1), {})
self.assertEqual(self.v2_cache.get_many(['ford2','arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
self.v2_cache.set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(self.cache.get_many(['ford3','arthur3']), {})
self.assertEqual(self.cache.get_many(['ford3','arthur3'], version=1), {})
self.assertEqual(self.cache.get_many(['ford3','arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(self.v2_cache.get_many(['ford3','arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(self.v2_cache.get_many(['ford3','arthur3'], version=1), {})
self.assertEqual(self.v2_cache.get_many(['ford3','arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
self.v2_cache.set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(self.cache.get_many(['ford4','arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(self.cache.get_many(['ford4','arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(self.cache.get_many(['ford4','arthur4'], version=2), {})
self.assertEqual(self.v2_cache.get_many(['ford4','arthur4']), {})
self.assertEqual(self.v2_cache.get_many(['ford4','arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(self.v2_cache.get_many(['ford4','arthur4'], version=2), {})
def test_incr_version(self):
self.cache.set('answer', 42, version=2)
self.assertEqual(self.cache.get('answer'), None)
self.assertEqual(self.cache.get('answer', version=1), None)
self.assertEqual(self.cache.get('answer', version=2), 42)
self.assertEqual(self.cache.get('answer', version=3), None)
self.assertEqual(self.cache.incr_version('answer', version=2), 3)
self.assertEqual(self.cache.get('answer'), None)
self.assertEqual(self.cache.get('answer', version=1), None)
self.assertEqual(self.cache.get('answer', version=2), None)
self.assertEqual(self.cache.get('answer', version=3), 42)
self.v2_cache.set('answer2', 42)
self.assertEqual(self.v2_cache.get('answer2'), 42)
self.assertEqual(self.v2_cache.get('answer2', version=1), None)
self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
self.assertEqual(self.v2_cache.get('answer2', version=3), None)
self.assertEqual(self.v2_cache.incr_version('answer2'), 3)
self.assertEqual(self.v2_cache.get('answer2'), None)
self.assertEqual(self.v2_cache.get('answer2', version=1), None)
self.assertEqual(self.v2_cache.get('answer2', version=2), None)
self.assertEqual(self.v2_cache.get('answer2', version=3), 42)
self.assertRaises(ValueError, self.cache.incr_version, 'does_not_exist')
def test_decr_version(self):
self.cache.set('answer', 42, version=2)
self.assertEqual(self.cache.get('answer'), None)
self.assertEqual(self.cache.get('answer', version=1), None)
self.assertEqual(self.cache.get('answer', version=2), 42)
self.assertEqual(self.cache.decr_version('answer', version=2), 1)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.get('answer', version=1), 42)
self.assertEqual(self.cache.get('answer', version=2), None)
self.v2_cache.set('answer2', 42)
self.assertEqual(self.v2_cache.get('answer2'), 42)
self.assertEqual(self.v2_cache.get('answer2', version=1), None)
self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
self.assertEqual(self.v2_cache.decr_version('answer2'), 1)
self.assertEqual(self.v2_cache.get('answer2'), None)
self.assertEqual(self.v2_cache.get('answer2', version=1), 42)
self.assertEqual(self.v2_cache.get('answer2', version=2), None)
self.assertRaises(ValueError, self.cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
self.cache.set('answer1', 42)
self.assertEqual(self.cache.get('answer1'), 42)
self.assertEqual(self.custom_key_cache.get('answer1'), None)
self.assertEqual(self.custom_key_cache2.get('answer1'), None)
self.custom_key_cache.set('answer2', 42)
self.assertEqual(self.cache.get('answer2'), None)
self.assertEqual(self.custom_key_cache.get('answer2'), 42)
self.assertEqual(self.custom_key_cache2.get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = self.cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = self.cache
request = self._get_request_cache('/cache/test')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content)
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content)
self.assertEqual(get_cache_data.cookies, response.cookies)
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
class DBCacheTests(BaseCacheTests, TransactionTestCase):
backend_name = 'django.core.cache.backends.db.DatabaseCache'
def setUp(self):
# Spaces are used in the table name to ensure quoting/escaping is working
self._table_name = 'test cache table'
management.call_command('createcachetable', self._table_name, verbosity=0, interactive=False)
self.cache = get_cache(self.backend_name, LOCATION=self._table_name, OPTIONS={'MAX_ENTRIES': 30})
self.prefix_cache = get_cache(self.backend_name, LOCATION=self._table_name, KEY_PREFIX='cacheprefix')
self.v2_cache = get_cache(self.backend_name, LOCATION=self._table_name, VERSION=2)
self.custom_key_cache = get_cache(self.backend_name, LOCATION=self._table_name, KEY_FUNCTION=custom_key_func)
self.custom_key_cache2 = get_cache(self.backend_name, LOCATION=self._table_name, KEY_FUNCTION='regressiontests.cache.tests.custom_key_func')
def tearDown(self):
from django.db import connection
cursor = connection.cursor()
cursor.execute('DROP TABLE %s' % connection.ops.quote_name(self._table_name))
connection.commit()
def test_cull(self):
self.perform_cull_test(50, 29)
def test_zero_cull(self):
self.cache = get_cache(self.backend_name, LOCATION=self._table_name, OPTIONS={'MAX_ENTRIES': 30, 'CULL_FREQUENCY': 0})
self.perform_cull_test(50, 18)
def test_old_initialization(self):
self.cache = get_cache('db://%s?max_entries=30&cull_frequency=0' % self._table_name)
self.perform_cull_test(50, 18)
def test_second_call_doesnt_crash(self):
with self.assertRaisesRegexp(management.CommandError,
"Cache table 'test cache table' could not be created"):
management.call_command(
'createcachetable',
self._table_name,
verbosity=0,
interactive=False
)
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_syncdb(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
def test_createcachetable_observes_database_router(self):
old_routers = router.routers
try:
router.routers = [DBCacheRouter()]
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', 'cache_table',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# one query is used to create the table and another one the index
with self.assertNumQueries(2, using='other'):
management.call_command('createcachetable', 'cache_table',
database='other',
verbosity=0, interactive=False)
finally:
router.routers = old_routers
class LocMemCacheTests(unittest.TestCase, BaseCacheTests):
backend_name = 'django.core.cache.backends.locmem.LocMemCache'
def setUp(self):
self.cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30})
self.prefix_cache = get_cache(self.backend_name, KEY_PREFIX='cacheprefix')
self.v2_cache = get_cache(self.backend_name, VERSION=2)
self.custom_key_cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30}, KEY_FUNCTION=custom_key_func)
self.custom_key_cache2 = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30}, KEY_FUNCTION='regressiontests.cache.tests.custom_key_func')
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
self.prefix_cache._cache = self.cache._cache
self.prefix_cache._expire_info = self.cache._expire_info
self.v2_cache._cache = self.cache._cache
self.v2_cache._expire_info = self.cache._expire_info
self.custom_key_cache._cache = self.cache._cache
self.custom_key_cache._expire_info = self.cache._expire_info
self.custom_key_cache2._cache = self.cache._cache
self.custom_key_cache2._expire_info = self.cache._expire_info
def tearDown(self):
self.cache.clear()
def test_cull(self):
self.perform_cull_test(50, 29)
def test_zero_cull(self):
self.cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30, 'CULL_FREQUENCY': 0})
self.perform_cull_test(50, 19)
def test_old_initialization(self):
self.cache = get_cache('locmem://?max_entries=30&cull_frequency=0')
self.perform_cull_test(50, 19)
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
mirror_cache = get_cache(self.backend_name)
other_cache = get_cache(self.backend_name, LOCATION='other')
self.cache.set('value1', 42)
self.assertEqual(mirror_cache.get('value1'), 42)
self.assertEqual(other_cache.get('value1'), None)
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = self.cache.make_key(key)
self.cache.set(key, 1, timeout=self.cache.default_timeout*10)
expire = self.cache._expire_info[_key]
self.cache.incr(key)
self.assertEqual(expire, self.cache._expire_info[_key])
self.cache.decr(key)
self.assertEqual(expire, self.cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain a cache backend setting that points at
# your memcache server.
@unittest.skipUnless(
settings.CACHES[DEFAULT_CACHE_ALIAS]['BACKEND'].startswith('django.core.cache.backends.memcached.'),
"memcached not available")
class MemcachedCacheTests(unittest.TestCase, BaseCacheTests):
backend_name = 'django.core.cache.backends.memcached.MemcachedCache'
def setUp(self):
name = settings.CACHES[DEFAULT_CACHE_ALIAS]['LOCATION']
self.cache = get_cache(self.backend_name, LOCATION=name)
self.prefix_cache = get_cache(self.backend_name, LOCATION=name, KEY_PREFIX='cacheprefix')
self.v2_cache = get_cache(self.backend_name, LOCATION=name, VERSION=2)
self.custom_key_cache = get_cache(self.backend_name, LOCATION=name, KEY_FUNCTION=custom_key_func)
self.custom_key_cache2 = get_cache(self.backend_name, LOCATION=name, KEY_FUNCTION='regressiontests.cache.tests.custom_key_func')
def tearDown(self):
self.cache.clear()
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, self.cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, self.cache.set, 'a' * 251, 'value')
class FileBasedCacheTests(unittest.TestCase, BaseCacheTests):
"""
Specific test cases for the file-based cache.
"""
backend_name = 'django.core.cache.backends.filebased.FileBasedCache'
def setUp(self):
self.dirname = tempfile.mkdtemp()
self.cache = get_cache(self.backend_name, LOCATION=self.dirname, OPTIONS={'MAX_ENTRIES': 30})
self.prefix_cache = get_cache(self.backend_name, LOCATION=self.dirname, KEY_PREFIX='cacheprefix')
self.v2_cache = get_cache(self.backend_name, LOCATION=self.dirname, VERSION=2)
self.custom_key_cache = get_cache(self.backend_name, LOCATION=self.dirname, KEY_FUNCTION=custom_key_func)
self.custom_key_cache2 = get_cache(self.backend_name, LOCATION=self.dirname, KEY_FUNCTION='regressiontests.cache.tests.custom_key_func')
def tearDown(self):
self.cache.clear()
def test_hashing(self):
"""Test that keys are hashed into subdirectories correctly"""
self.cache.set("foo", "bar")
key = self.cache.make_key("foo")
keyhash = hashlib.md5(key).hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assertTrue(os.path.exists(keypath))
def test_subdirectory_removal(self):
"""
Make sure that the created subdirectories are correctly removed when empty.
"""
self.cache.set("foo", "bar")
key = self.cache.make_key("foo")
keyhash = hashlib.md5(key).hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assertTrue(os.path.exists(keypath))
self.cache.delete("foo")
self.assertTrue(not os.path.exists(keypath))
self.assertTrue(not os.path.exists(os.path.dirname(keypath)))
self.assertTrue(not os.path.exists(os.path.dirname(os.path.dirname(keypath))))
def test_cull(self):
self.perform_cull_test(50, 29)
def test_old_initialization(self):
self.cache = get_cache('file://%s?max_entries=30' % self.dirname)
self.perform_cull_test(50, 29)
class CustomCacheKeyValidationTests(unittest.TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
cache = get_cache('regressiontests.cache.liberal_backend://')
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
class GetCacheTests(unittest.TestCase):
def test_simple(self):
cache = get_cache('locmem://')
from django.core.cache.backends.locmem import LocMemCache
self.assertTrue(isinstance(cache, LocMemCache))
from django.core.cache import cache
self.assertTrue(isinstance(cache, get_cache('default').__class__))
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache', **{'TIMEOUT': 120})
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
from django.core import signals
cache = get_cache('regressiontests.cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.path = '/cache/test/'
self.cache = get_cache('default')
def tearDown(self):
self.cache.clear()
def _get_request(self, path, method='GET'):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.method = method
request.path = request.path_info = "/cache/%s" % path
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self._get_request(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_get_cache_key_with_query(self):
request = self._get_request(self.path + '?test=1')
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.bd889c5a59603af44333ed21504db3cd.d41d8cd98f00b204e9800998ecf8427e')
def test_learn_cache_key(self):
request = self._get_request(self.path, 'HEAD')
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private' : True}, set(['private'])),
# Test whether private/public attributes are mutually exclusive
('private', {'private' : True}, set(['private'])),
('private', {'public' : True}, set(['public'])),
('public', {'public' : True}, set(['public'])),
('public', {'private' : True}, set(['private'])),
('must-revalidate,max-age=60,private', {'public' : True}, set(['must-revalidate', 'max-age=60', 'public'])),
('must-revalidate,max-age=60,public', {'private' : True}, set(['must-revalidate', 'max-age=60', 'private'])),
('must-revalidate,max-age=60', {'public' : True}, set(['must-revalidate', 'max-age=60', 'public'])),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.cache = get_cache('default')
def tearDown(self):
self.cache.clear()
def _get_request(self, method):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.method = method
request.path = request.path_info = self.path
return request
def _get_request_cache(self, method):
request = self._get_request(method)
request._cache_update_cache = True
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self._get_request_cache('HEAD')
self._set_cache(request, test_content)
request = self._get_request('HEAD')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content, get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self._get_request_cache('GET')
self._set_cache(request, test_content)
request = self._get_request('HEAD')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content, get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.cache = get_cache('default')
def tearDown(self):
self.cache.clear()
def _get_request(self, method='GET'):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.method = method
request.path = request.path_info = self.path
return request
def _get_request_cache(self, query_string=None):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
if query_string:
request.META['QUERY_STRING'] = query_string
request.GET = QueryDict(query_string)
request.path = request.path_info = self.path
request._cache_update_cache = True
request.method = 'GET'
request.session = {}
return request
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self._get_request()
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_unicode(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n (self):
request = self._get_request()
lang = translation.get_language()
tz = force_unicode(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self._get_request()
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self._get_request_cache(query_string='foo=bar&other=true')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content)
# different QUERY_STRING, cache must be empty
request = self._get_request_cache(query_string='foo=bar&somethingelse=true')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
# i18n tests
en_message ="Hello world!"
es_message ="Hola mundo!"
request = self._get_request_cache()
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, en_message)
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self._get_request_cache()
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message)
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message)
# reset the language
translation.deactivate()
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHE_MIDDLEWARE_ANONYMOUS_ONLY=False,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
# The following tests will need to be modified in Django 1.6 to not use
# deprecated ways of using the cache_page decorator that will be removed in
# such version
def setUp(self):
self.factory = RequestFactory()
self.default_cache = get_cache('default')
self.other_cache = get_cache('other')
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.views.decorators.cache')
def tearDown(self):
self.restore_warnings_state()
self.default_cache.clear()
self.other_cache.clear()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache_anonymous_only, False)
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 300) # Timeout value for 'default' cache, i.e. 300
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_anonymous_only, False)
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_anonymous_only=True, cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
self.assertEqual(as_view_decorator_with_custom.cache_anonymous_only, True)
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertNotEquals(result, None)
self.assertEqual(result.content, 'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertEqual(result, None)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertNotEquals(result, None)
self.assertEqual(result.content, 'Hello World 1')
@override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
def test_cache_middleware_anonymous_only_wont_cause_session_access(self):
""" The cache middleware shouldn't cause a session access due to
CACHE_MIDDLEWARE_ANONYMOUS_ONLY if nothing else has accessed the
session. Refs 13283 """
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth.middleware import AuthenticationMiddleware
middleware = CacheMiddleware()
session_middleware = SessionMiddleware()
auth_middleware = AuthenticationMiddleware()
request = self.factory.get('/view_anon/')
# Put the request through the request middleware
session_middleware.process_request(request)
auth_middleware.process_request(request)
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
session_middleware.process_response(request, response)
response = middleware.process_response(request, response)
self.assertEqual(request.session.accessed, False)
@override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
def test_cache_middleware_anonymous_only_with_cache_page(self):
"""CACHE_MIDDLEWARE_ANONYMOUS_ONLY should still be effective when used
with the cache_page decorator: the response to a request from an
authenticated user should not be cached."""
request = self.factory.get('/view_anon/')
class MockAuthenticatedUser(object):
def is_authenticated(self):
return True
class MockAccessedSession(object):
accessed = True
request.user = MockAuthenticatedUser()
request.session = MockAccessedSession()
response = cache_page(hello_world_view)(request, '1')
self.assertFalse("Cache-Control" in response)
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(hello_world_view)
default_with_prefix_view = cache_page(key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(cache='other')(hello_world_view)
other_with_prefix_view = cache_page(cache='other', key_prefix='prefix2')(hello_world_view)
other_with_timeout_view = cache_page(3, cache='other', key_prefix='prefix3')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, 'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, 'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, 'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, 'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, 'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, 'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, 'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, 'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, 'Hello World 9')
# Request from the alternate cache with a new prefix and a custom timeout
response = other_with_timeout_view(request, '10')
self.assertEqual(response.content, 'Hello World 10')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
cache = get_cache('default')
response = default_view(request, '11')
self.assertEqual(response.content, 'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, 'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, 'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, 'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, 'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, 'Hello World 16')
# ... but a view with a custom timeout will still hit
response = other_with_timeout_view(request, '17')
self.assertEqual(response.content, 'Hello World 10')
# And if we wait a few more seconds
time.sleep(2)
# the custom timeouot cache will miss
response = other_with_timeout_view(request, '18')
self.assertEqual(response.content, 'Hello World 18')
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.cache = get_cache('default')
def tearDown(self):
self.cache.clear()
def _get_request(self, path, method='GET'):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.method = method
request.path = request.path_info = "/cache/%s" % path
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self._get_request(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_get_cache_key_with_query(self):
request = self._get_request(self.path + '?test=1')
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.bd889c5a59603af44333ed21504db3cd.d41d8cd98f00b204e9800998ecf8427e')
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestEtagWithAdmin(TestCase):
# See https://code.djangoproject.com/ticket/16003
urls = "regressiontests.admin_views.urls"
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header('ETag'))
| bsd-3-clause |
r4mp/mete_python_cherrypy_jinja2 | mete/controllers/drinks.py | 1 | 1135 | from controllers.basehandler import BaseHandler
from models.drink import Drink
import json
import cherrypy
class Drinks(BaseHandler):
#@cherrypy.expose
def index(self):
drinks = Drink.list(cherrypy.request.db)
template = self.templateEnv.get_template('drinks/index.html')
return template.render(drinks=drinks)
#@cherrypy.expose(alias="new.json")
#@cherrypy.tools.json_out()
#def new_json(self):
# return [ drink.to_JSON() for drink in Drink.list(cherrypy.request.db) ]
#@cherrypy.expose
def new(self, name = "", price = 0, bottle_size = 0, caffeine = 0, logo_url = ""):
if cherrypy.request.method == 'GET':
template = self.templateEnv.get_template('drinks/new.html')
return template.render(drinks=[])
elif cherrypy.request.method == 'POST':
drink = Drink()
drink.name = name
drink.price = price
drink.bottle_size = bottle_size
drink.caffeine = caffeine
drink.logo_url = logo_url
cherrypy.request.db.add(drink)
else:
pass
| agpl-3.0 |
YongseopKim/crosswalk-test-suite | webapi/webapi-resourcetiming-w3c-tests/inst.xpk.py | 357 | 6759 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
sergei-maertens/django | django/contrib/gis/gdal/srs.py | 21 | 12049 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
def __init__(self, srs_input='', srs_type='user'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, six.string_types):
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
try:
capi.release_srs(self._ptr)
except (AttributeError, TypeError):
pass # Some part might already have been garbage collected
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(force_bytes(wkt))))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), force_bytes(dialect))
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
try:
capi.destroy_ct(self._ptr)
except (AttributeError, TypeError):
pass
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
javierip/parallel-processing-teaching-toolkit | 04-GPU-accelerators/04-PyOpenCL/08-vector_reduc_min/vector_reduc_min.py | 2 | 2804 | # -*- coding: utf-8 -*-
# Parallel Processing Teaching Toolkit
# PyOpenCL - Example 08
# Vector Reduction : Search Minimum
# https://github.com/javierip/parallel-processing-teaching-toolkit
import pyopencl as cl
import numpy as np
import time # For measure the running times
VECTOR_SIZE = 256 # Elements of vector
# Create a random vector
a_host = np.random.rand(VECTOR_SIZE).astype(np.float32)
# Create a aux vector for GPU operation
b_host = np.zeros(VECTOR_SIZE).astype(np.float32)
# Create a empty vectors for the result
res_host= np.zeros(VECTOR_SIZE).astype(np.float32) # Result in CPU from GPU
result_host= np.zeros(2).astype(np.float32) # Result in CPU
# Create CL context
platform = cl.get_platforms()[0]
device = platform.get_devices()[0] #get first gpu available
print "Running: ", platform
print "In GPU: ", device
ctx = cl.Context([device])
queue = cl.CommandQueue(ctx)
tic=time.time()
#Operation using the CPU
result_host[0]=100
for i in range(0,VECTOR_SIZE):
if(a_host[i]<result_host[0]):
result_host[0]=a_host[i]
result_host[1]=i
time_cpu=time.time()-tic
print
print a_host
# Transfer host (CPU) memory to device (GPU) memory
mf = cl.mem_flags
a_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_host)
b_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_host)
# Kernel code
prg = cl.Program(ctx, """
__kernel void sum(__global const float *g_idata, __global float *g_odata) {
__local float sdata[1024];
__local int sindice[1024];
int tid = get_local_id(0);
int i = get_global_id(0);
sdata[tid] = g_idata[i];
sindice[tid] = i;
barrier(CLK_LOCAL_MEM_FENCE);
for (unsigned int s = get_local_size(0) / 2; s > 0; s >>= 1) {
if (tid < s ) {
if (sdata[tid] > sdata[tid + s]) {
sdata[tid] = sdata[tid + s];
sindice[tid] = sindice[tid + s];
}//else sindice[tid] = sindice[tid];
barrier(CLK_LOCAL_MEM_FENCE);
}
}
if (tid == 0) {
g_odata[0] = sdata[0];
}
if (tid == 1) {
g_odata[1] = sindice[0];
}
}
""").build()
# Create empty gpu array for the result
res_gpu = cl.Buffer(ctx, mf.WRITE_ONLY, a_host.nbytes)
tic=time.time()
#Operation using the GPU - call the kernel on the card
prg.sum(queue, a_host.shape, None, a_gpu, res_gpu)
time_gpu=time.time()-tic
#Clear GPU resources
res_host = np.empty_like(a_host)
cl.enqueue_copy(queue, res_host, res_gpu)
# Print the results
print "-" * 80
print "Vector Reduction with Vector Size =" , VECTOR_SIZE
print "Min CPU:" , result_host[0]
print "Min GPU:" , res_host[0]
print "Index CPU:" , result_host[1]
print "Index GPU:" , res_host[1]
print "Time CPU:", time_cpu
print "Time GPU:", time_gpu
| apache-2.0 |
ygol/dotfiles | bin/.venv-ansible-venv/lib/python2.6/site-packages/paramiko/dsskey.py | 36 | 6757 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
DSS keys.
"""
import os
from hashlib import sha1
from Crypto.PublicKey import DSA
from paramiko import util
from paramiko.common import zero_byte
from paramiko.py3compat import long
from paramiko.ssh_exception import SSHException
from paramiko.message import Message
from paramiko.ber import BER, BERException
from paramiko.pkey import PKey
class DSSKey (PKey):
"""
Representation of a DSS key which can be used to sign an verify SSH2
data.
"""
def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None):
self.p = None
self.q = None
self.g = None
self.y = None
self.x = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if vals is not None:
self.p, self.q, self.g, self.y = vals
else:
if msg is None:
raise SSHException('Key object may not be empty')
if msg.get_text() != 'ssh-dss':
raise SSHException('Invalid key')
self.p = msg.get_mpint()
self.q = msg.get_mpint()
self.g = msg.get_mpint()
self.y = msg.get_mpint()
self.size = util.bit_length(self.p)
def asbytes(self):
m = Message()
m.add_string('ssh-dss')
m.add_mpint(self.p)
m.add_mpint(self.q)
m.add_mpint(self.g)
m.add_mpint(self.y)
return m.asbytes()
def __str__(self):
return self.asbytes()
def __hash__(self):
h = hash(self.get_name())
h = h * 37 + hash(self.p)
h = h * 37 + hash(self.q)
h = h * 37 + hash(self.g)
h = h * 37 + hash(self.y)
# h might be a long by now...
return hash(h)
def get_name(self):
return 'ssh-dss'
def get_bits(self):
return self.size
def can_sign(self):
return self.x is not None
def sign_ssh_data(self, data):
digest = sha1(data).digest()
dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q), long(self.x)))
# generate a suitable k
qsize = len(util.deflate_long(self.q, 0))
while True:
k = util.inflate_long(os.urandom(qsize), 1)
if (k > 2) and (k < self.q):
break
r, s = dss.sign(util.inflate_long(digest, 1), k)
m = Message()
m.add_string('ssh-dss')
# apparently, in rare cases, r or s may be shorter than 20 bytes!
rstr = util.deflate_long(r, 0)
sstr = util.deflate_long(s, 0)
if len(rstr) < 20:
rstr = zero_byte * (20 - len(rstr)) + rstr
if len(sstr) < 20:
sstr = zero_byte * (20 - len(sstr)) + sstr
m.add_string(rstr + sstr)
return m
def verify_ssh_sig(self, data, msg):
if len(msg.asbytes()) == 40:
# spies.com bug: signature has no header
sig = msg.asbytes()
else:
kind = msg.get_text()
if kind != 'ssh-dss':
return 0
sig = msg.get_binary()
# pull out (r, s) which are NOT encoded as mpints
sigR = util.inflate_long(sig[:20], 1)
sigS = util.inflate_long(sig[20:], 1)
sigM = util.inflate_long(sha1(data).digest(), 1)
dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q)))
return dss.verify(sigM, (sigR, sigS))
def _encode_key(self):
if self.x is None:
raise SSHException('Not enough key information')
keylist = [0, self.p, self.q, self.g, self.y, self.x]
try:
b = BER()
b.encode(keylist)
except BERException:
raise SSHException('Unable to create ber encoding of key')
return b.asbytes()
def write_private_key_file(self, filename, password=None):
self._write_private_key_file('DSA', filename, self._encode_key(), password)
def write_private_key(self, file_obj, password=None):
self._write_private_key('DSA', file_obj, self._encode_key(), password)
@staticmethod
def generate(bits=1024, progress_func=None):
"""
Generate a new private DSS key. This factory function can be used to
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
:param function progress_func:
an optional function to call at key points in key generation (used
by ``pyCrypto.PublicKey``).
:return: new `.DSSKey` private key
"""
dsa = DSA.generate(bits, os.urandom, progress_func)
key = DSSKey(vals=(dsa.p, dsa.q, dsa.g, dsa.y))
key.x = dsa.x
return key
### internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file('DSA', filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key('DSA', file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
# private key file contains:
# DSAPrivateKey = { version = 0, p, q, g, y, x }
try:
keylist = BER(data).decode()
except BERException as e:
raise SSHException('Unable to parse key file: ' + str(e))
if (type(keylist) is not list) or (len(keylist) < 6) or (keylist[0] != 0):
raise SSHException('not a valid DSA private key file (bad ber encoding)')
self.p = keylist[1]
self.q = keylist[2]
self.g = keylist[3]
self.y = keylist[4]
self.x = keylist[5]
self.size = util.bit_length(self.p)
| mit |
acsone/odoo | addons/account_anglo_saxon/product.py | 384 | 3035 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_category(osv.osv):
_inherit = "product.category"
_columns = {
'property_account_creditor_price_difference_categ': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
#Redefine fields to change help text for anglo saxon methodology.
'property_account_income_categ': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used to value outgoing stock using sale price."),
'property_account_expense_categ': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used to value outgoing stock using cost price."),
}
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'property_account_creditor_price_difference': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
#Redefine fields to change help text for anglo saxon methodology.
'property_account_income': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used to value outgoing stock using sale price."),
'property_account_expense': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used to value outgoing stock using cost price."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ravibhure/ansible | lib/ansible/playbook/helpers.py | 16 | 17712 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound, AnsibleAssertionError
from ansible.module_utils.six import string_types
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
'''
Given a list of mixed task/block data (parsed from YAML),
return a list of Block() objects, where implicit blocks
are created for each bare Task.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
if not isinstance(ds, (list, type(None))):
raise AnsibleAssertionError('%s should be a list or None but is %s' % (ds, type(ds)))
block_list = []
if ds:
for block_ds in ds:
b = Block.load(
block_ds,
play=play,
parent_block=parent_block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader,
)
# Implicit blocks are created by bare tasks listed in a play without
# an explicit block statement. If we have two implicit blocks in a row,
# squash them down to a single block to save processing time later.
if b._implicit and len(block_list) > 0 and block_list[-1]._implicit:
for t in b.block:
if isinstance(t._parent, (TaskInclude, IncludeRole)):
t._parent._parent = block_list[-1]
else:
t._parent = block_list[-1]
block_list[-1].block.extend(b.block)
else:
block_list.append(b)
return block_list
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
'''
Given a list of task datastructures (parsed from YAML),
return a list of Task() or TaskInclude() objects.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.playbook.handler_task_include import HandlerTaskInclude
from ansible.template import Templar
if not isinstance(ds, list):
raise AnsibleAssertionError('The ds (%s) should be a list but was a %s' % (ds, type(ds)))
task_list = []
for task_ds in ds:
if not isinstance(task_ds, dict):
AnsibleAssertionError('The ds (%s) should be a dict but was a %s' % (ds, type(ds)))
if 'block' in task_ds:
t = Block.load(
task_ds,
play=play,
parent_block=block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader,
)
task_list.append(t)
else:
if 'include' in task_ds or 'import_tasks' in task_ds or 'include_tasks' in task_ds:
if 'include' in task_ds:
display.deprecated("The use of 'include' for tasks has been deprecated. "
"Use 'import_tasks' for static inclusions or 'include_tasks' for dynamic inclusions")
if use_handlers:
include_class = HandlerTaskInclude
else:
include_class = TaskInclude
t = include_class.load(
task_ds,
block=block,
role=role,
task_include=None,
variable_manager=variable_manager,
loader=loader
)
all_vars = variable_manager.get_vars(play=play, task=t)
templar = Templar(loader=loader, variables=all_vars)
# check to see if this include is dynamic or static:
# 1. the user has set the 'static' option to false or true
# 2. one of the appropriate config options was set
if 'include_tasks' in task_ds:
is_static = False
elif 'import_tasks' in task_ds:
is_static = True
elif t.static is not None:
display.deprecated("The use of 'static' has been deprecated. "
"Use 'import_tasks' for static inclusion, or 'include_tasks' for dynamic inclusion")
is_static = t.static
else:
is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
(use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
(not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop)
if is_static:
if t.loop is not None:
if 'import_tasks' in task_ds:
raise AnsibleParserError("You cannot use loops on 'import_tasks' statements. You should use 'include_tasks' instead.", obj=task_ds)
else:
raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds)
# we set a flag to indicate this include was static
t.statically_loaded = True
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = block
cumulative_path = None
found = False
subdir = 'tasks'
if use_handlers:
subdir = 'handlers'
while parent_include is not None:
if not isinstance(parent_include, TaskInclude):
parent_include = parent_include._parent
continue
parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
if cumulative_path is None:
cumulative_path = parent_include_dir
elif not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
include_target = templar.template(t.args['_raw_params'])
if t._role:
new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path)
include_file = loader.path_dwim_relative(new_basedir, subdir, include_target)
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
if os.path.exists(include_file):
found = True
break
else:
parent_include = parent_include._parent
if not found:
try:
include_target = templar.template(t.args['_raw_params'])
except AnsibleUndefinedVariable as e:
raise AnsibleParserError(
"Error when evaluating variable in include name: %s.\n\n"
"When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n"
"or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n"
"sources like group or host vars." % t.args['_raw_params'],
obj=task_ds,
suppress_extended_error=True,
orig_exc=e)
if t._role:
include_file = loader.path_dwim_relative(t._role._role_path, subdir, include_target)
else:
include_file = loader.path_dwim(include_target)
try:
data = loader.load_from_file(include_file)
if data is None:
display.warning('file %s is empty and had no tasks to include' % include_file)
continue
elif not isinstance(data, list):
raise AnsibleParserError("included task files must contain a list of tasks", obj=data)
# since we can't send callbacks here, we display a message directly in
# the same fashion used by the on_include callback. We also do it here,
# because the recursive nature of helper methods means we may be loading
# nested includes, and we want the include order printed correctly
display.vv("statically imported: %s" % include_file)
except AnsibleFileNotFound:
if t.static or \
C.DEFAULT_TASK_INCLUDES_STATIC or \
C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers:
raise
display.deprecated(
"Included file '%s' not found, however since this include is not "
"explicitly marked as 'static: yes', we will try and include it dynamically "
"later. In the future, this will be an error unless 'static: no' is used "
"on the include task. If you do not want missing includes to be considered "
"dynamic, use 'static: yes' on the include or set the global ansible.cfg "
"options to make all includes static for tasks and/or handlers" % include_file, version="2.7"
)
task_list.append(t)
continue
ti_copy = t.copy(exclude_parent=True)
ti_copy._parent = block
included_blocks = load_list_of_blocks(
data,
play=play,
parent_block=None,
task_include=ti_copy,
role=role,
use_handlers=use_handlers,
loader=loader,
variable_manager=variable_manager,
)
# FIXME: remove once 'include' is removed
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = ti_copy.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if 'include_tasks' in task_ds or 'import_tasks' in task_ds:
raise AnsibleParserError('You cannot specify "tags" inline to the task, it is a task keyword')
if len(ti_copy.tags) > 0:
raise AnsibleParserError(
"Include tasks should not specify tags in more than one way (both via args and directly on the task). "
"Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement",
obj=task_ds,
suppress_extended_error=True,
)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option",
version="2.7")
else:
tags = ti_copy.tags[:]
# now we extend the tags on each of the included blocks
for b in included_blocks:
b.tags = list(set(b.tags).union(tags))
# END FIXME
# FIXME: handlers shouldn't need this special handling, but do
# right now because they don't iterate blocks correctly
if use_handlers:
for b in included_blocks:
task_list.extend(b.block)
else:
task_list.extend(included_blocks)
else:
t.is_static = False
task_list.append(t)
elif 'include_role' in task_ds or 'import_role' in task_ds:
ir = IncludeRole.load(
task_ds,
block=block,
role=role,
task_include=None,
variable_manager=variable_manager,
loader=loader,
)
# 1. the user has set the 'static' option to false or true
# 2. one of the appropriate config options was set
is_static = False
if 'import_role' in task_ds:
is_static = True
elif ir.static is not None:
display.deprecated("The use of 'static' for 'include_role' has been deprecated. "
"Use 'import_role' for static inclusion, or 'include_role' for dynamic inclusion")
is_static = ir.static
if is_static:
if ir.loop is not None:
if 'import_tasks' in task_ds:
raise AnsibleParserError("You cannot use loops on 'import_role' statements. You should use 'include_role' instead.", obj=task_ds)
else:
raise AnsibleParserError("You cannot use 'static' on an include_role with a loop", obj=task_ds)
# we set a flag to indicate this include was static
ir.statically_loaded = True
# template the role name now, if needed
all_vars = variable_manager.get_vars(play=play, task=ir)
templar = Templar(loader=loader, variables=all_vars)
if templar._contains_vars(ir._role_name):
ir._role_name = templar.template(ir._role_name)
# uses compiled list from object
blocks, _ = ir.get_block_list(variable_manager=variable_manager, loader=loader)
t = task_list.extend(blocks)
else:
# passes task object itself for latter generation of list
t = task_list.append(ir)
else:
if use_handlers:
t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
else:
t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
task_list.append(t)
return task_list
def load_list_of_roles(ds, play, current_role_path=None, variable_manager=None, loader=None):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.role.include import RoleInclude
if not isinstance(ds, list):
raise AnsibleAssertionError('ds (%s) should be a list but was a %s' % (ds, type(ds)))
roles = []
for role_def in ds:
i = RoleInclude.load(role_def, play=play, current_role_path=current_role_path, variable_manager=variable_manager, loader=loader)
roles.append(i)
return roles
| gpl-3.0 |
ayushagrawal288/zamboni | mkt/webapps/query.py | 8 | 5087 | from django.db import models
from django.db.models.sql import compiler
class IndexQuerySet(models.query.QuerySet):
def with_index(self, **kw):
"""
Suggest indexes that should be used with this query as key-value pairs.
qs.with_index(t1='xxx') => INNER JOIN t1 USE INDEX (`xxx`)
"""
q = self._clone()
if not isinstance(q.query, IndexQuery):
q.query = self.query.clone(IndexQuery)
q.query.index_map.update(kw)
return q
def fetch_missed(self, pks):
# Remove the indexes before doing the id query.
if hasattr(self.query, 'index_map'):
index_map = self.query.index_map
self.query.index_map = {}
rv = super(IndexQuerySet, self).fetch_missed(pks)
self.query.index_map = index_map
return rv
else:
return super(IndexQuerySet, self).fetch_missed(pks)
class IndexQuery(models.query.sql.Query):
"""
Extends sql.Query to make it possible to specify indexes to use.
"""
def clone(self, klass=None, **kwargs):
# Maintain index_map across clones.
c = super(IndexQuery, self).clone(klass, **kwargs)
c.index_map = dict(self.index_map)
return c
def get_compiler(self, using=None, connection=None):
# Call super to figure out using and connection.
c = super(IndexQuery, self).get_compiler(using, connection)
return IndexCompiler(self, c.connection, c.using)
def _setup_query(self):
if not hasattr(self, 'index_map'):
self.index_map = {}
def get_count(self, using):
# Don't use the index for counts, it's slower.
index_map = self.index_map
self.index_map = {}
count = super(IndexQuery, self).get_count(using)
self.index_map = index_map
return count
class IndexCompiler(compiler.SQLCompiler):
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns and
ordering must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
index_map = self.query.index_map
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = (
self.query.alias_map[alias])
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
# jbalogh wuz here.
if name in index_map:
use_index = 'USE INDEX (%s)' % qn(index_map[name])
else:
use_index = ''
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s %s ON ('
% (join_type, qn(name), alias_str, use_index))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' % (
qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = connector = '' if first else ', '
result.append('%s%s%s %s' %
(connector, qn(name), alias_str, use_index))
# jbalogh out.
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if (alias not in self.query.alias_map or
self.query.alias_refcount[alias] == 1):
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
| bsd-3-clause |
cyclecomputing/boto | boto/gs/key.py | 4 | 42479 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
from boto.utils import compute_hash
from boto.utils import get_utf8_value
class Key(S3Key):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`boto.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_restore_headers(self, response):
return
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
elif key == 'x-goog-generation':
self.generation = value
# Use x-goog-stored-content-encoding and
# x-goog-stored-content-length to indicate original content length
# and encoding, which are transcoding-invariant (so are preferable
# over using content-encoding and size headers).
elif key == 'x-goog-stored-content-encoding':
self.content_encoding = value
elif key == 'x-goog-stored-content-length':
self.size = int(value)
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
# For GCS we need to include the object generation in the query args.
# The rest of the processing is handled in the parent class.
if self.generation:
if query_args:
query_args += '&'
query_args += 'generation=%s' % self.generation
super(Key, self).open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries,
response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self, headers=None):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation,
headers=headers)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from
it. The default behaviour is False which reads from
the current position of the file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS
:type headers: dict
:param headers: Additional headers to pass along with the request to GS.
:type replace: bool
:param replace: If True, replaces the contents of the file if it
already exists.
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from GS and the second representing
the total number of bytes that need to be transmitted.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO.StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise BotoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
# Return the generation so that the result URI can be built with this
# for automatic parallel uploads.
return resp.getheader('x-goog-generation')
| mit |
pantsbuild/pants | src/python/pants/option/options_integration_test.py | 3 | 4062 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from textwrap import dedent
from pants.fs.fs import safe_filename_from_path
from pants.testutil.pants_integration_test import (
ensure_daemon,
run_pants,
run_pants_with_workdir,
setup_tmpdir,
)
from pants.util.contextutil import temporary_dir
def test_invalid_options() -> None:
config = {
"DEFAULT": {"some_ludicrous_thing": 123},
"GLOBAL": {"backend_packages": ["pants.backend.python"], "invalid_global": True},
"invalid_scope": {"foo": "bar"},
"pytest": {"bad_option": True},
}
config_errors = [
"ERROR] Invalid option 'invalid_global' under [GLOBAL]",
"ERROR] Invalid scope [invalid_scope]",
"ERROR] Invalid option 'bad_option' under [pytest]",
]
# We error on invalid CLI options before validating the config file.
result = run_pants(["--pytest-invalid=ALL", "help"], config=config)
result.assert_failure()
assert "Unknown flags --invalid on scope pytest" in result.stderr
for error in config_errors:
assert error not in result.stderr
result = run_pants(["help"], config=config)
result.assert_failure()
assert "Unknown flags" not in result.stderr
for error in config_errors:
assert error in result.stderr
@ensure_daemon
def test_deprecation_and_ignore_warnings(use_pantsd: bool) -> None:
plugin = dedent(
"""\
from pants.option.subsystem import Subsystem
from pants.engine.rules import SubsystemRule
class Options(Subsystem):
help = "Options just for a test."
options_scope = "mock-options"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--deprecated",
removal_version="999.99.9.dev0",
removal_hint="blah",
)
def rules():
return [SubsystemRule(Options)]
"""
)
with setup_tmpdir(
{
"plugins/mock_options/register.py": plugin,
"BUILD": "files(name='t', sources=['fake'])",
}
) as tmpdir:
config = {
"GLOBAL": {
"pythonpath": [f"%(buildroot)s/{tmpdir}/plugins"],
"backend_packages": ["mock_options"],
},
"mock-options": {"deprecated": "foo"},
}
unmatched_glob_warning = f"Unmatched glob from {tmpdir}:t's `sources` field"
result = run_pants(["filedeps", f"{tmpdir}:t"], config=config, use_pantsd=use_pantsd)
result.assert_success()
assert unmatched_glob_warning in result.stderr
assert (
"DEPRECATED: option 'deprecated' in scope 'mock-options' will be removed in version "
"999.99.9.dev0."
) in result.stderr
config["GLOBAL"]["ignore_warnings"] = [ # type: ignore[index]
unmatched_glob_warning,
"$regex$DEPRECATED: option 'de.+ted'",
]
ignore_result = run_pants(["filedeps", f"{tmpdir}:t"], config=config, use_pantsd=use_pantsd)
ignore_result.assert_success()
assert unmatched_glob_warning not in ignore_result.stderr
assert "DEPRECATED: option 'another_deprecated'" not in ignore_result.stderr
def test_pants_symlink_workdirs() -> None:
with temporary_dir() as tmp_dir:
symlink_workdir = f"{tmp_dir}/.pants.d"
physical_workdir_base = f"{tmp_dir}/workdirs"
physical_workdir = f"{physical_workdir_base}/{safe_filename_from_path(symlink_workdir)}"
pants_run = run_pants_with_workdir(
[f"--pants-physical-workdir-base={physical_workdir_base}", "help"],
workdir=symlink_workdir,
)
pants_run.assert_success()
# Make sure symlink workdir is pointing to physical workdir
assert os.readlink(symlink_workdir) == physical_workdir
| apache-2.0 |
bjori/grpc | src/python/grpcio/grpc/framework/face/interfaces.py | 38 | 23415 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Interfaces for the face layer of RPC Framework."""
import abc
import enum
# cardinality, style, exceptions, abandonment, future, and stream are
# referenced from specification in this module.
from grpc.framework.common import cardinality # pylint: disable=unused-import
from grpc.framework.common import style # pylint: disable=unused-import
from grpc.framework.face import exceptions # pylint: disable=unused-import
from grpc.framework.foundation import abandonment # pylint: disable=unused-import
from grpc.framework.foundation import future # pylint: disable=unused-import
from grpc.framework.foundation import stream # pylint: disable=unused-import
@enum.unique
class Abortion(enum.Enum):
"""Categories of RPC abortion."""
CANCELLED = 'cancelled'
EXPIRED = 'expired'
NETWORK_FAILURE = 'network failure'
SERVICED_FAILURE = 'serviced failure'
SERVICER_FAILURE = 'servicer failure'
class CancellableIterator(object):
"""Implements the Iterator protocol and affords a cancel method."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __iter__(self):
"""Returns the self object in accordance with the Iterator protocol."""
raise NotImplementedError()
@abc.abstractmethod
def next(self):
"""Returns a value or raises StopIteration per the Iterator protocol."""
raise NotImplementedError()
@abc.abstractmethod
def cancel(self):
"""Requests cancellation of whatever computation underlies this iterator."""
raise NotImplementedError()
class RpcContext(object):
"""Provides RPC-related information and action."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def is_active(self):
"""Describes whether the RPC is active or has terminated."""
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
"""Describes the length of allowed time remaining for the RPC.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the RPC to complete before it is considered to have timed
out.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_abortion_callback(self, abortion_callback):
"""Registers a callback to be called if the RPC is aborted.
Args:
abortion_callback: A callable to be called and passed an Abortion value
in the event of RPC abortion.
"""
raise NotImplementedError()
class Call(object):
"""Invocation-side representation of an RPC.
Attributes:
context: An RpcContext affording information about the RPC.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def cancel(self):
"""Requests cancellation of the RPC."""
raise NotImplementedError()
class UnaryUnaryMultiCallable(object):
"""Affords invoking a unary-unary RPC in any call style."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, request, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self, request, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future's result value will be the response value of the RPC.
In the event of RPC abortion, the returned Future's exception value
will be an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, request, response_callback, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_callback: A callback to be called to accept the restponse value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
class UnaryStreamMultiCallable(object):
"""Affords invoking a unary-stream RPC in any call style."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, request, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion
of the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, request, response_consumer, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_consumer: A stream.Consumer to be called to accept the restponse
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
class StreamUnaryMultiCallable(object):
"""Affords invoking a stream-unary RPC in any call style."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, request_iterator, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self, request_iterator, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future's result value will be the response value of the RPC.
In the event of RPC abortion, the returned Future's exception value
will be an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, response_callback, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_callback: A callback to be called to accept the restponse value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
class StreamStreamMultiCallable(object):
"""Affords invoking a stream-stream RPC in any call style."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, request_iterator, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion
of the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, response_consumer, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
l Args:
response_consumer: A stream.Consumer to be called to accept the restponse
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
class MethodImplementation(object):
"""A sum type that describes an RPC method implementation.
Attributes:
cardinality: A cardinality.Cardinality value.
style: A style.Service value.
unary_unary_inline: The implementation of the RPC method as a callable
value that takes a request value and an RpcContext object and returns a
response value. Only non-None if cardinality is
cardinality.Cardinality.UNARY_UNARY and style is style.Service.INLINE.
unary_stream_inline: The implementation of the RPC method as a callable
value that takes a request value and an RpcContext object and returns an
iterator of response values. Only non-None if cardinality is
cardinality.Cardinality.UNARY_STREAM and style is style.Service.INLINE.
stream_unary_inline: The implementation of the RPC method as a callable
value that takes an iterator of request values and an RpcContext object
and returns a response value. Only non-None if cardinality is
cardinality.Cardinality.STREAM_UNARY and style is style.Service.INLINE.
stream_stream_inline: The implementation of the RPC method as a callable
value that takes an iterator of request values and an RpcContext object
and returns an iterator of response values. Only non-None if cardinality
is cardinality.Cardinality.STREAM_STREAM and style is
style.Service.INLINE.
unary_unary_event: The implementation of the RPC method as a callable value
that takes a request value, a response callback to which to pass the
response value of the RPC, and an RpcContext. Only non-None if
cardinality is cardinality.Cardinality.UNARY_UNARY and style is
style.Service.EVENT.
unary_stream_event: The implementation of the RPC method as a callable
value that takes a request value, a stream.Consumer to which to pass the
the response values of the RPC, and an RpcContext. Only non-None if
cardinality is cardinality.Cardinality.UNARY_STREAM and style is
style.Service.EVENT.
stream_unary_event: The implementation of the RPC method as a callable
value that takes a response callback to which to pass the response value
of the RPC and an RpcContext and returns a stream.Consumer to which the
request values of the RPC should be passed. Only non-None if cardinality
is cardinality.Cardinality.STREAM_UNARY and style is style.Service.EVENT.
stream_stream_event: The implementation of the RPC method as a callable
value that takes a stream.Consumer to which to pass the response values
of the RPC and an RpcContext and returns a stream.Consumer to which the
request values of the RPC should be passed. Only non-None if cardinality
is cardinality.Cardinality.STREAM_STREAM and style is
style.Service.EVENT.
"""
__metaclass__ = abc.ABCMeta
class MultiMethodImplementation(object):
"""A general type able to service many RPC methods."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def service(self, name, response_consumer, context):
"""Services an RPC.
Args:
name: The RPC method name.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
context: An RpcContext object.
Returns:
A stream.Consumer with which to accept the request values of the RPC. The
consumer returned from this method may or may not be invoked to
completion: in the case of RPC abortion, RPC Framework will simply stop
passing values to this object. Implementations must not assume that this
object will be called to completion of the request stream or even called
at all.
Raises:
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
exceptions.NoSuchMethodError: If this MultiMethod does not recognize the
given RPC method name and is not able to service the RPC.
"""
raise NotImplementedError()
class GenericStub(object):
"""Affords RPC methods to callers."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def blocking_value_in_value_out(self, name, request, timeout):
"""Invokes a unary-request-unary-response RPC method.
This method blocks until either returning the response value of the RPC
(in the event of RPC completion) or raising an exception (in the event of
RPC abortion).
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future_value_in_value_out(self, name, request, timeout):
"""Invokes a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future will return an outcome indicating that the RPC returned
the response value of the RPC. In the event of RPC abortion, the
returned Future will return an outcome indicating that the RPC raised
an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def inline_value_in_stream_out(self, name, request, timeout):
"""Invokes a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion of
the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def blocking_stream_in_value_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-unary-response RPC method.
This method blocks until either returning the response value of the RPC
(in the event of RPC completion) or raising an exception (in the event of
RPC abortion).
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future_stream_in_value_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-unary-response RPC method.
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future will return an outcome indicating that the RPC returned
the response value of the RPC. In the event of RPC abortion, the
returned Future will return an outcome indicating that the RPC raised
an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def inline_stream_in_stream_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-stream-response RPC method.
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion of
the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_value_in_value_out(
self, name, request, response_callback, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
response_callback: A callback to be called to accept the response value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_value_in_stream_out(
self, name, request, response_consumer, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_stream_in_value_out(
self, name, response_callback, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
response_callback: A callback to be called to accept the response value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_stream_in_stream_out(
self, name, response_consumer, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_unary_multi_callable(self, name):
"""Creates a UnaryUnaryMultiCallable for a unary-unary RPC method.
Args:
name: The RPC method name.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_stream_multi_callable(self, name):
"""Creates a UnaryStreamMultiCallable for a unary-stream RPC method.
Args:
name: The RPC method name.
Returns:
A UnaryStreamMultiCallable value for the name unary-stream RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_unary_multi_callable(self, name):
"""Creates a StreamUnaryMultiCallable for a stream-unary RPC method.
Args:
name: The RPC method name.
Returns:
A StreamUnaryMultiCallable value for the named stream-unary RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_stream_multi_callable(self, name):
"""Creates a StreamStreamMultiCallable for a stream-stream RPC method.
Args:
name: The RPC method name.
Returns:
A StreamStreamMultiCallable value for the named stream-stream RPC method.
"""
raise NotImplementedError()
class DynamicStub(object):
"""A stub with RPC-method-bound multi-callable attributes.
Instances of this type responsd to attribute access as follows: if the
requested attribute is the name of a unary-unary RPC method, the value of the
attribute will be a UnaryUnaryMultiCallable with which to invoke the RPC
method; if the requested attribute is the name of a unary-stream RPC method,
the value of the attribute will be a UnaryStreamMultiCallable with which to
invoke the RPC method; if the requested attribute is the name of a
stream-unary RPC method, the value of the attribute will be a
StreamUnaryMultiCallable with which to invoke the RPC method; and if the
requested attribute is the name of a stream-stream RPC method, the value of
the attribute will be a StreamStreamMultiCallable with which to invoke the
RPC method.
"""
__metaclass__ = abc.ABCMeta
| bsd-3-clause |
bally12345/enigma2 | lib/python/Components/EpgList.py | 1 | 44945 | from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from Components.config import config
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaBlend, MultiContentEntryPixmapAlphaTest
from Components.Renderer.Picon import getPiconName
from skin import parseColor, parseFont
from enigma import eEPGCache, eListbox, eListboxPythonMultiContent, ePicLoad, gFont, eRect, eSize, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER, RT_VALIGN_CENTER, RT_VALIGN_TOP, RT_WRAP
from Tools.Alternatives import CompareWithAlternatives
from Tools.LoadPixmap import LoadPixmap
from time import localtime, time, strftime
from ServiceReference import ServiceReference
from Tools.Directories import pathExists, resolveFilename, SCOPE_CURRENT_SKIN
from os import listdir, path
EPG_TYPE_SINGLE = 0
EPG_TYPE_MULTI = 1
EPG_TYPE_SIMILAR = 2
EPG_TYPE_ENHANCED = 3
EPG_TYPE_INFOBAR = 4
EPG_TYPE_GRAPH = 5
MAX_TIMELINES = 6
class Rect:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.w = width
self.h = height
# silly, but backward compatible
def left(self):
return self.x
def top(self):
return self.y
def height(self):
return self.h
def width(self):
return self.w
class EPGList(HTMLComponent, GUIComponent):
def __init__(self, type = EPG_TYPE_SINGLE, selChangedCB = None, timer = None, time_epoch = 120, overjump_empty = False):
self.cur_event = None
self.cur_service = None
self.offs = 0
self.time_base = None
self.time_epoch = time_epoch
self.select_rect = None
self.event_rect = None
self.service_rect = None
self.currentlyPlaying = None
self.showPicon = False
self.showServiceTitle = True
self.picload = ePicLoad()
self.overjump_empty = overjump_empty
self.timer = timer
self.onSelChanged = [ ]
if selChangedCB is not None:
self.onSelChanged.append(selChangedCB)
GUIComponent.__init__(self)
self.type = type
self.l = eListboxPythonMultiContent()
if type == EPG_TYPE_SINGLE or type == EPG_TYPE_ENHANCED or type == EPG_TYPE_INFOBAR:
self.l.setBuildFunc(self.buildSingleEntry)
elif type == EPG_TYPE_MULTI:
self.l.setBuildFunc(self.buildMultiEntry)
elif type == EPG_TYPE_GRAPH:
self.l.setBuildFunc(self.buildGraphEntry)
else:
assert(type == EPG_TYPE_SIMILAR)
self.l.setBuildFunc(self.buildSimilarEntry)
self.epgcache = eEPGCache.getInstance()
self.clock_pixmap = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock.png'))
self.clock_add_pixmap = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_add.png'))
self.clock_pre_pixmap = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_pre.png'))
self.clock_post_pixmap = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_post.png'))
self.clock_prepost_pixmap = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_prepost.png'))
self.nowEvPix = None
self.nowSelEvPix = None
self.othEvPix = None
self.selEvPix = None
self.nowServPix = None
self.recEvPix = None
self.recSelEvPix = None
self.zapEvPix = None
self.zapSelEvPix = None
self.borderColor = 0xC0C0C0
self.borderColorService = 0xC0C0C0
self.foreColor = 0xffffff
self.foreColorSelected = 0xffffff
self.backColor = 0x2D455E
self.backColorSelected = 0xd69600
self.foreColorService = 0xffffff
self.backColorService = 0x2D455E
self.foreColorNow = 0xffffff
self.foreColorNowSelected = 0xffffff
self.backColorNow = 0x00825F
self.backColorNowSelected = 0xd69600
self.foreColorServiceNow = 0xffffff
self.backColorServiceNow = 0x00825F
self.foreColorRecord = 0xffffff
self.backColorRecord = 0xd13333
self.foreColorRecordSelected = 0xffffff
self.backColorRecordSelected = 0x9e2626
self.foreColorZap = 0xffffff
self.backColorZap = 0x669466
self.foreColorZapSelected = 0xffffff
self.backColorZapSelected = 0x436143
self.serviceFontNameGraph = "Regular"
self.serviceFontSizeGraph = 20
self.eventFontNameGraph = "Regular"
self.eventFontSizeGraph = 18
self.eventFontNameSingle = "Regular"
self.eventFontSizeSingle = 22
self.eventFontNameMulti = "Regular"
self.eventFontSizeMulti = 22
self.eventFontNameInfobar = "Regular"
self.eventFontSizeInfobar = 22
self.listHeight = None
self.listWidth = None
self.serviceBorderWidth = 1
self.serviceNamePadding = 0
self.eventBorderWidth = 1
self.eventNamePadding = 3
self.eventNameAlign = 'left'
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "ServiceFont":
font = parseFont(value, ((1,1),(1,1)) )
self.serviceFontNameGraph = font.family
self.serviceFontSizeGraph = font.pointSize
elif attrib == "EntryFontGraphical":
font = parseFont(value, ((1,1),(1,1)) )
self.eventFontNameGraph = font.family
self.eventFontSize = font.pointSize
elif attrib == "EntryFontAlignment":
self.eventNameAlign = value
elif attrib == "EventFontSingle":
font = parseFont(value, ((1,1),(1,1)) )
self.eventFontNameSingle = font.family
self.eventFontSizeSingle = font.pointSize
elif attrib == "EventFontInfobar":
font = parseFont(value, ((1,1),(1,1)) )
self.eventFontNameInfobar = font.family
self.eventFontSizeInfobar = font.pointSize
elif attrib == "ServiceForegroundColor":
self.foreColorService = parseColor(value).argb()
elif attrib == "ServiceForegroundColorNow":
self.foreColorServiceNow = parseColor(value).argb()
elif attrib == "ServiceBackgroundColor":
self.backColorService = parseColor(value).argb()
elif attrib == "ServiceBackgroundColorNow":
self.backColorServiceNow = parseColor(value).argb()
elif attrib == "EntryForegroundColor":
self.foreColor = parseColor(value).argb()
elif attrib == "EntryForegroundColorSelected":
self.foreColorSelected = parseColor(value).argb()
elif attrib == "EntryBackgroundColor":
self.backColor = parseColor(value).argb()
elif attrib == "EntryBackgroundColorSelected":
self.backColorSelected = parseColor(value).argb()
elif attrib == "EntryBackgroundColorNow":
self.backColorNow = parseColor(value).argb()
elif attrib == "EntryBackgroundColorNowSelected":
self.backColorNowSelected = parseColor(value).argb()
elif attrib == "EntryForegroundColorNow":
self.foreColorNow = parseColor(value).argb()
elif attrib == "EntryForegroundColorNowSelected":
self.foreColorNowSelected = parseColor(value).argb()
elif attrib == "ServiceBorderColor":
self.borderColorService = parseColor(value).argb()
elif attrib == "ServiceBorderWidth":
self.serviceBorderWidth = int(value)
elif attrib == "ServiceNamePadding":
self.serviceNamePadding = int(value)
elif attrib == "EntryBorderColor":
self.borderColor = parseColor(value).argb()
elif attrib == "EventBorderWidth":
self.eventBorderWidth = int(value)
elif attrib == "EventNamePadding":
self.eventNamePadding = int(value)
elif attrib == "RecordForegroundColor":
self.foreColorRecord = parseColor(value).argb()
elif attrib == "RecordForegroundColorSelected":
self.foreColorRecordSelected = parseColor(value).argb()
elif attrib == "RecordBackgroundColor":
self.backColorRecord = parseColor(value).argb()
elif attrib == "RecordBackgroundColorSelected":
self.backColorRecordSelected = parseColor(value).argb()
elif attrib == "ZapForegroundColor":
self.foreColorZap = parseColor(value).argb()
elif attrib == "ZapBackgroundColor":
self.backColorZap = parseColor(value).argb()
elif attrib == "ZapForegroundColorSelected":
self.foreColorZapSelected = parseColor(value).argb()
elif attrib == "ZapBackgroundColorSelected":
self.backColorZapSelected = parseColor(value).argb()
else:
attribs.append((attrib,value))
self.skinAttributes = attribs
rc = GUIComponent.applySkin(self, desktop, screen)
self.listHeight = self.instance.size().height()
self.listWidth = self.instance.size().width()
self.setItemsPerPage()
return rc
def getCurrentChangeCount(self):
if self.type == EPG_TYPE_MULTI and self.l.getCurrentSelection() is not None:
return self.l.getCurrentSelection()[0]
return 0
def moveUp(self):
self.instance.moveSelection(self.instance.moveUp)
def moveDown(self):
self.instance.moveSelection(self.instance.moveDown)
def isSelectable(self, service, service_name, events, picon):
return (events and len(events) and True) or False
def setShowServiceMode(self, value):
self.showServiceTitle = "servicename" in value
self.showPicon = "picon" in value
self.recalcEntrySize()
self.selEntry(0) #Select entry again so that the clipping region gets updated if needed
def setOverjump_Empty(self, overjump_empty):
if overjump_empty:
self.l.setSelectableFunc(self.isSelectable)
else:
self.l.setSelectableFunc(None)
def setEpoch(self, epoch):
self.offs = 0
self.time_epoch = epoch
self.fillGraphEPG(None)
def setCurrentlyPlaying(self, serviceref):
self.currentlyPlaying = serviceref
def getEventFromId(self, service, eventid):
event = None
if self.epgcache is not None and eventid is not None:
event = self.epgcache.lookupEventId(service.ref, eventid)
return event
def getIndexFromService(self, serviceref):
if serviceref is not None:
for x in range(len(self.list)):
if str(self.list[x][0]).startswith('1:'): # check for Graphical EPG
if CompareWithAlternatives(self.list[x][0], serviceref.toString()):
return x
elif str(self.list[x][1]).startswith('1:'): # check for Multi EPG
if CompareWithAlternatives(self.list[x][1], serviceref.toString()):
return x
else:
return None
return None
def moveToService(self, serviceref):
newIdx = self.getIndexFromService(serviceref)
if newIdx is None:
newIdx = 0
self.setCurrentIndex(newIdx)
def setCurrentIndex(self, index):
if self.instance is not None:
self.instance.moveSelectionTo(index)
def moveTo(self, dir):
if self.instance is not None:
self.instance.moveSelection(dir)
def getCurrent(self):
if self.type == EPG_TYPE_GRAPH:
if self.cur_service is None:
return (None, None)
old_service = self.cur_service #(service, service_name, events, picon)
events = self.cur_service[2]
refstr = self.cur_service[0]
if self.cur_event is None or not events or not len(events):
return (None, ServiceReference(refstr))
event = events[self.cur_event] #(event_id, event_title, begin_time, duration)
eventid = event[0]
service = ServiceReference(refstr)
event = self.getEventFromId(service, eventid) # get full event info
return (event, service)
else:
idx = 0
if self.type == EPG_TYPE_MULTI:
idx += 1
tmp = self.l.getCurrentSelection()
if tmp is None:
return (None, None)
eventid = tmp[idx+1]
service = ServiceReference(tmp[idx])
event = self.getEventFromId(service, eventid)
return ( event, service )
def connectSelectionChanged(func):
if not self.onSelChanged.count(func):
self.onSelChanged.append(func)
def disconnectSelectionChanged(func):
self.onSelChanged.remove(func)
def serviceChanged(self):
cur_sel = self.l.getCurrentSelection()
if cur_sel:
self.findBestEvent()
def findBestEvent(self):
old_service = self.cur_service #(service, service_name, events, picon)
cur_service = self.cur_service = self.l.getCurrentSelection()
time_base = self.getTimeBase()
last_time = time()
if old_service and self.cur_event is not None:
events = old_service[2]
cur_event = events[self.cur_event] #(event_id, event_title, begin_time, duration)
if cur_event[2] > last_time:
last_time = cur_event[2]
if cur_service:
self.cur_event = 0
events = cur_service[2]
best = None
if events and len(events):
best_diff = 0
idx = 0
for event in events: #iterate all events
ev_time = event[2]
if ev_time < time_base:
ev_time = time_base
diff = abs(ev_time - last_time)
if best is None or (diff < best_diff):
best = idx
best_diff = diff
if best is not None and ev_time > last_time:
break
idx += 1
self.cur_event = best
self.selEntry(0)
def selectionChanged(self):
for x in self.onSelChanged:
if x is not None:
x()
GUI_WIDGET = eListbox
def setItemsPerPage(self):
if self.type == EPG_TYPE_GRAPH:
if self.listHeight > 0:
itemHeight = self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()
else:
itemHeight = 54 # some default (270/5)
if config.epgselection.heightswitch.getValue():
if ((self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()) / 3) >= 27:
tmp_itemHeight = ((self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()) / 3)
elif ((self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()) / 2) >= 27:
tmp_itemHeight = ((self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()) / 2)
else:
tmp_itemHeight = 27
if tmp_itemHeight < itemHeight:
itemHeight = tmp_itemHeight
else:
if ((self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()) * 3) <= 45:
itemHeight = ((self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()) * 3)
elif ((self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()) * 2) <= 45:
itemHeight = ((self.listHeight / config.epgselection.itemsperpage_vixepg.getValue()) * 2)
else:
itemHeight = 45
self.l.setItemHeight(itemHeight)
self.instance.resize(eSize(self.listWidth, self.listHeight / itemHeight * itemHeight))
self.picload.setPara((self.listWidth, itemHeight - 2 * self.eventBorderWidth, 0, 0, 1, 1, "#00000000"))
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/CurrentEvent.png'), 0, 0, False)
self.nowEvPix = self.picload.getData()
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/SelectedCurrentEvent.png'), 0, 0, False)
self.nowSelEvPix = self.picload.getData()
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/OtherEvent.png'), 0, 0, False)
self.othEvPix = self.picload.getData()
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/SelectedEvent.png'), 0, 0, False)
self.selEvPix = self.picload.getData()
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/CurrentService.png'), 0, 0, False)
self.nowServPix = self.picload.getData()
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/RecordEvent.png'), 0, 0, False)
self.recEvPix = self.picload.getData()
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/SelectedRecordEvent.png'), 0, 0, False)
self.recSelEvPix = self.picload.getData()
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/ZapEvent.png'), 0, 0, False)
self.zapEvPix = self.picload.getData()
self.picload.startDecode(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/SelectedZapEvent.png'), 0, 0, False)
self.zapSelEvPix = self.picload.getData()
elif self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_SIMILAR:
if self.listHeight > 0:
itemHeight = self.listHeight / config.epgselection.itemsperpage_enhanced.getValue()
else:
itemHeight = 32
self.l.setItemHeight(itemHeight)
self.instance.resize(eSize(self.listWidth, self.listHeight / itemHeight * itemHeight))
elif self.type == EPG_TYPE_MULTI:
if self.listHeight > 0:
itemHeight = self.listHeight / config.epgselection.itemsperpage_multi.getValue()
else:
itemHeight = 32
self.l.setItemHeight(itemHeight)
self.instance.resize(eSize(self.listWidth, self.listHeight / itemHeight * itemHeight))
elif self.type == EPG_TYPE_INFOBAR:
if self.listHeight > 0:
itemHeight = float(self.listHeight / config.epgselection.itemsperpage_infobar.getValue())
else:
itemHeight = 32
self.l.setItemHeight(int(itemHeight))
def setServiceFontsize(self):
self.l.setFont(0, gFont(self.serviceFontNameGraph, self.serviceFontSizeGraph + config.epgselection.serv_fontsize_vixepg.getValue()))
def setEventFontsize(self):
if self.type == EPG_TYPE_GRAPH:
self.l.setFont(1, gFont(self.eventFontNameGraph, self.eventFontSizeGraph + config.epgselection.ev_fontsize_vixepg.getValue()))
elif self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_SIMILAR:
self.l.setFont(0, gFont(self.eventFontNameSingle, self.eventFontSizeSingle + config.epgselection.ev_fontsize_enhanced.getValue()))
elif self.type == EPG_TYPE_MULTI:
self.l.setFont(0, gFont(self.eventFontNameMulti, self.eventFontSizeMulti + config.epgselection.ev_fontsize_multi.getValue()))
self.l.setFont(1, gFont(self.eventFontNameMulti, self.eventFontSizeMulti - 4 + config.epgselection.ev_fontsize_multi.getValue()))
elif self.type == EPG_TYPE_INFOBAR:
self.l.setFont(0, gFont(self.eventFontNameInfobar, self.eventFontSizeInfobar + config.epgselection.ev_fontsize_infobar.getValue()))
def postWidgetCreate(self, instance):
if self.type == EPG_TYPE_GRAPH:
self.setOverjump_Empty(self.overjump_empty)
instance.setWrapAround(True)
instance.selectionChanged.get().append(self.serviceChanged)
instance.setContent(self.l)
self.l.setSelectionClip(eRect(0,0,0,0), False)
self.setServiceFontsize()
self.setEventFontsize()
else:
instance.setWrapAround(False)
instance.selectionChanged.get().append(self.selectionChanged)
instance.setContent(self.l)
self.setEventFontsize()
def preWidgetRemove(self, instance):
if self.type == EPG_TYPE_GRAPH:
instance.selectionChanged.get().remove(self.serviceChanged)
instance.setContent(None)
else:
instance.selectionChanged.get().remove(self.selectionChanged)
instance.setContent(None)
def recalcEntrySize(self):
esize = self.l.getItemSize()
width = esize.width()
height = esize.height()
if self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_INFOBAR:
if self.type == EPG_TYPE_INFOBAR:
fontwdith = config.epgselection.ev_fontsize_infobar.getValue()
else:
fontwdith = config.epgselection.ev_fontsize_enhanced.getValue()
self.weekday_rect = Rect(0, 0, float(width / 100) * (10 + (fontwdith / 2)) , height)
self.datetime_rect = Rect(self.weekday_rect.width(), 0, float(width / 100) * (25 + fontwdith), height)
self.descr_rect = Rect(self.datetime_rect.left() + self.datetime_rect.width(), 0, float(width / 100) * (70 + fontwdith), height)
elif self.type == EPG_TYPE_MULTI:
xpos = 0;
w = width / 10 * 3;
self.service_rect = Rect(xpos, 0, w-10, height)
xpos += w;
w = width / 10 * 2;
self.start_end_rect = Rect(xpos, 0, w-10, height)
self.progress_rect = Rect(xpos, 4, w-10, height-8)
xpos += w
w = width / 10 * 5;
self.descr_rect = Rect(xpos, 0, width, height)
elif self.type == EPG_TYPE_GRAPH:
if self.showServiceTitle:
w = width / 10 * 2;
else: # if self.showPicon: # this must be set if showServiceTitle is None
w = 2 * height - 2 * self.serviceBorderWidth # FIXME: could do better...
self.service_rect = Rect(0, 0, w, height)
self.event_rect = Rect(w, 0, width - w, height)
piconHeight = height - 2 * self.serviceBorderWidth
piconWidth = 2 * piconHeight # FIXME: could do better...
if piconWidth > w - 2 * self.serviceBorderWidth:
piconWidth = w - 2 * self.serviceBorderWidth
self.picon_size = eSize(piconWidth, piconHeight)
else: # EPG_TYPE_SIMILAR
fontwdith = config.epgselection.ev_fontsize_enhanced.getValue()
self.weekday_rect = Rect(0, 0, float(width / 100) * (10 + (fontwdith / 2)) , height)
self.datetime_rect = Rect(self.weekday_rect.width(), 0, float(width / 100) * (25 + fontwdith), height)
self.service_rect = Rect(self.datetime_rect.left() + self.datetime_rect.width(), 0, float(width / 100) * (70 + fontwdith), height)
def calcEntryPosAndWidthHelper(self, stime, duration, start, end, width):
xpos = (stime - start) * width / (end - start)
ewidth = (stime + duration - start) * width / (end - start)
ewidth -= xpos;
if xpos < 0:
ewidth += xpos;
xpos = 0;
if (xpos + ewidth) > width:
ewidth = width - xpos
return xpos, ewidth
def calcEntryPosAndWidth(self, event_rect, time_base, time_epoch, ev_start, ev_duration):
xpos, width = self.calcEntryPosAndWidthHelper(ev_start, ev_duration, time_base, time_base + time_epoch * 60, event_rect.width())
return xpos + event_rect.left(), width
def buildSingleEntry(self, service, eventId, beginTime, duration, EventName):
(clock_pic, rec) = self.getPixmapForEntry(service, eventId, beginTime, duration)
r1 = self.weekday_rect
r2 = self.datetime_rect
r3 = self.descr_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, _(strftime("%a", t))),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, strftime("%e/%m, %-H:%M", t))
]
if rec:
res.extend((
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r3.x, r3.y, 21, 21, clock_pic),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + 25, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName)
))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName))
return res
def buildSimilarEntry(self, service, eventId, beginTime, service_name, duration):
(clock_pic, rec) = self.getPixmapForEntry(service, eventId, beginTime, duration)
r1 = self.weekday_rect
r2 = self.datetime_rect
r3 = self.service_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, _(strftime("%a", t))),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, strftime("%e/%m, %-H:%M", t))
]
if rec:
res.extend((
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r3.x, r3.y, 21, 21, clock_pic),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + 25, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name)
))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
return res
def buildMultiEntry(self, changecount, service, eventId, beginTime, duration, EventName, nowTime, service_name):
(clock_pic, rec) = self.getPixmapForEntry(service, eventId, beginTime, duration)
r1 = self.service_rect
r2 = self.progress_rect
r3 = self.descr_rect
r4 = self.start_end_rect
res = [ None ] # no private data needed
if rec:
res.extend((
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w-21, r1.h, 0, RT_HALIGN_LEFT, service_name),
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, r1.x+r1.w-16, r1.y, 21, 21, clock_pic)
))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT, service_name))
if beginTime is not None:
if nowTime < beginTime:
begin = localtime(beginTime)
end = localtime(beginTime+duration)
res.extend((
(eListboxPythonMultiContent.TYPE_TEXT, r4.x, r4.y, r4.w, r4.h, 1, RT_HALIGN_CENTER|RT_VALIGN_CENTER, "%02d.%02d - %02d.%02d"%(begin[3],begin[4],end[3],end[4])),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT, EventName)
))
else:
percent = (nowTime - beginTime) * 100 / duration
res.extend((
(eListboxPythonMultiContent.TYPE_PROGRESS, r2.x, r2.y, r2.w, r2.h, percent),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT, EventName)
))
return res
def buildGraphEntry(self, service, service_name, events, picon):
r1 = self.service_rect
r2 = self.event_rect
selected = self.cur_service[0] == service
# Picon and Service name
if CompareWithAlternatives(service, self.currentlyPlaying and self.currentlyPlaying.toString()):
serviceForeColor = self.foreColorServiceNow
serviceBackColor = self.backColorServiceNow
bgpng = self.nowServPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics": # bacground for service rect
serviceBackColor = None
else:
serviceForeColor = self.foreColorService
serviceBackColor = self.backColorService
bgpng = self.othEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics": # bacground for service rect
serviceBackColor = None
res = [ None ]
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics": # bacground for service rect
res.append(MultiContentEntryPixmapAlphaTest(
pos = (r1.x + self.serviceBorderWidth, r1.y + self.serviceBorderWidth),
size = (r1.w - 2 * self.serviceBorderWidth, r1.h - 2 * self.serviceBorderWidth),
png = bgpng))
else:
res.append(MultiContentEntryText(
pos = (r1.x, r1.y),
size = (r1.w, r1.h),
font = 0, flags = RT_HALIGN_LEFT | RT_VALIGN_CENTER,
text = "",
color = serviceForeColor, color_sel = serviceForeColor,
backcolor = serviceBackColor, backcolor_sel = serviceBackColor,
border_width = self.serviceBorderWidth, border_color = self.borderColorService) )
displayPicon = None
if self.showPicon:
if picon is None: # go find picon and cache its location
picon = getPiconName(service)
curIdx = self.l.getCurrentSelectionIndex()
self.list[curIdx] = (service, service_name, events, picon)
piconWidth = self.picon_size.width()
piconHeight = self.picon_size.height()
if picon != "":
self.picload.setPara((piconWidth, piconHeight, 1, 1, 1, 1, "#FFFFFFFF"))
self.picload.startDecode(picon, 0, 0, False)
displayPicon = self.picload.getData()
if displayPicon is not None:
res.append(MultiContentEntryPixmapAlphaTest(
pos = (r1.x + self.serviceBorderWidth, r1.y + self.serviceBorderWidth),
size = (piconWidth, piconHeight),
png = displayPicon,
backcolor = None, backcolor_sel = None) )
elif not self.showServiceTitle:
# no picon so show servicename anyway in picon space
namefont = 1
namefontflag = RT_HALIGN_LEFT | RT_VALIGN_CENTER | RT_WRAP
namewidth = piconWidth
piconWidth = 0
else:
piconWidth = 0
if self.showServiceTitle: # we have more space so reset parms
namefont = 0
namefontflag = RT_HALIGN_LEFT | RT_VALIGN_CENTER
namewidth = r1.w - piconWidth
if self.showServiceTitle or displayPicon is None:
res.append(MultiContentEntryText(
pos = (r1.x + piconWidth + self.serviceBorderWidth + self.serviceNamePadding,
r1.y + self.serviceBorderWidth),
size = (namewidth - 2 * (self.serviceBorderWidth + self.serviceNamePadding),
r1.h - 2 * self.serviceBorderWidth),
font = namefont, flags = namefontflag,
text = service_name,
color = serviceForeColor, color_sel = serviceForeColor,
backcolor = serviceBackColor, backcolor_sel = serviceBackColor))
# Events for service
backColorSel = self.backColorSelected
if events:
start = self.time_base + self.offs * self.time_epoch * 60
end = start + self.time_epoch * 60
left = r2.x
top = r2.y
width = r2.w
height = r2.h
now = time()
for ev in events: #(event_id, event_title, begin_time, duration)
stime = ev[2]
duration = ev[3]
xpos, ewidth = self.calcEntryPosAndWidthHelper(stime, duration, start, end, width)
rec = stime and self.timer.isInTimer(ev[0], stime, duration, service)
rectype = self.GraphEPGRecRed(service, ev[2], ev[3], ev[0])
if self.eventNameAlign.lower() == 'left':
alignnment = RT_HALIGN_LEFT | RT_VALIGN_CENTER | RT_WRAP
else:
alignnment = RT_HALIGN_CENTER | RT_VALIGN_CENTER | RT_WRAP
if selected and self.select_rect.x == xpos + left:
if stime <= now and now < (stime + duration):
foreColor = self.foreColorNow
backColor = self.backColorNow
foreColorSel = self.foreColorNowSelected
backColorSel = self.backColorNowSelected
bgpng = self.nowSelEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
backColor = None
backColorSel = None
else:
foreColor = self.foreColor
backColor = self.backColor
foreColorSel = self.foreColorSelected
backColorSel = self.backColorSelected
bgpng = self.selEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
backColor = None
backColorSel = None
elif stime <= now and now < (stime + duration):
foreColor = self.foreColorNow
backColor = self.backColorNow
foreColorSel = self.foreColorNowSelected
backColorSel = self.backColorNowSelected
bgpng = self.nowEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
backColor = None
backColorSel = None
else:
backColor = self.backColor
foreColor = self.foreColor
foreColorSel = self.foreColorSelected
backColorSel = self.backColorSelected
bgpng = self.othEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
backColor = None
backColorSel = None
if rec and selected and self.select_rect.x == xpos + left:
if rectype == "record":
foreColor = self.foreColorRecord
backColor = self.backColorRecord
foreColorSel = self.foreColorRecordSelected
backColorSel = self.backColorRecordSelected
bgpng = self.recSelEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
backColor = None
backColorSel = None
elif rectype == "justplay":
foreColor = self.foreColorZap
backColor = self.backColorZap
foreColorSel = self.foreColorZapSelected
backColorSel = self.backColorZapSelected
bgpng = self.zapSelEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
backColor = None
backColorSel = None
elif rec:
if rectype == "record":
foreColor = self.foreColorRecord
backColor = self.backColorRecord
foreColorSel = self.foreColorRecordSelected
backColorSel = self.backColorRecordSelected
bgpng = self.recEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
backColor = None
backColorSel = None
elif rectype == "justplay":
foreColor = self.foreColorZap
backColor = self.backColorZap
foreColorSel = self.foreColorZapSelected
backColorSel = self.backColorZapSelected
bgpng = self.zapEvPix
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
backColor = None
backColorSel = None
# event box background
if bgpng is not None and config.epgselection.graphics_mode.value == "graphics":
res.append(MultiContentEntryPixmapAlphaTest(
pos = (left + xpos + self.eventBorderWidth, top + self.eventBorderWidth),
size = (ewidth - 2 * self.eventBorderWidth, height - 2 * self.eventBorderWidth),
png = bgpng))
else:
res.append(MultiContentEntryText(
pos = (left + xpos, top), size = (ewidth, height),
font = 1, flags = RT_HALIGN_LEFT | RT_VALIGN_CENTER,
text = "", color = None, color_sel = None,
backcolor = backColor, backcolor_sel = backColorSel,
border_width = self.eventBorderWidth, border_color = self.borderColor))
# event text
evX = left + xpos + self.eventBorderWidth + self.eventNamePadding
evY = top + self.eventBorderWidth
evW = ewidth - 2 * (self.eventBorderWidth + self.eventNamePadding)
evH = height - 2 * self.eventBorderWidth
if evW > 0:
res.append(MultiContentEntryText(
pos = (evX, evY), size = (evW, evH),
font = 1, flags = alignnment,
text = ev[1],
color = foreColor, color_sel = foreColorSel,
backcolor = backColor, backcolor_sel = backColorSel))
# recording icons
if rec:
res.append(MultiContentEntryPixmapAlphaBlend(
pos = (left+xpos+ewidth-22, top+height-22), size = (21, 21),
png = self.getClockPixmap(service, stime, duration, ev[0]),
backcolor_sel = backColorSel))
else:
# event box background
if self.othEvPix is not None and config.epgselection.graphics_mode.value == "graphics":
res.append(MultiContentEntryPixmapAlphaTest(
pos = (r2.x + self.eventBorderWidth, r2.y + self.eventBorderWidth),
size = (r2.w - 2 * self.eventBorderWidth, r2.h - 2 * self.eventBorderWidth),
png = self.othEvPix))
else:
res.append(MultiContentEntryText(
pos = (r2.x + self.eventBorderWidth, r2.y + self.eventBorderWidth),
size = (r2.w - 2 * self.eventBorderWidth, r2.h - 2 * self.eventBorderWidth),
font = 1, flags = RT_HALIGN_LEFT | RT_VALIGN_CENTER,
text = "",
color = self.foreColor, color_sel = self.foreColor,
backcolor = self.backColor, backcolor_sel = self.backColorSelected,
border_width = self.eventBorderWidth, border_color = self.borderColor))
return res
def selEntry(self, dir, visible = True):
cur_service = self.cur_service #(service, service_name, events, picon)
self.recalcEntrySize()
valid_event = self.cur_event is not None
if cur_service:
update = True
entries = cur_service[2]
if dir == 0: #current
update = False
elif dir == +1: #next
if valid_event and self.cur_event + 1 < len(entries):
self.cur_event += 1
else:
self.offs += 1
self.fillGraphEPG(None) # refill
return True
elif dir == -1: #prev
if valid_event and self.cur_event - 1 >= 0:
self.cur_event -= 1
elif self.offs > 0:
self.offs -= 1
self.fillGraphEPG(None) # refill
return True
elif dir == +2: #next page
self.offs += 1
self.fillGraphEPG(None) # refill
return True
elif dir == -2: #prev
if self.offs > 0:
self.offs -= 1
self.fillGraphEPG(None) # refill
return True
if cur_service and valid_event:
entry = entries[self.cur_event] #(event_id, event_title, begin_time, duration)
time_base = self.time_base + self.offs*self.time_epoch * 60
xpos, width = self.calcEntryPosAndWidth(self.event_rect, time_base, self.time_epoch, entry[2], entry[3])
self.select_rect = Rect(xpos ,0, width, self.event_rect.height)
self.l.setSelectionClip(eRect(xpos, 0, width, self.event_rect.h), visible and update)
else:
self.select_rect = self.event_rect
self.l.setSelectionClip(eRect(self.event_rect.x, self.event_rect.y, self.event_rect.w, self.event_rect.h), False)
self.selectionChanged()
return False
def fillSimilarList(self, refstr, event_id):
# search similar broadcastings
t = time()
if event_id is None:
return
self.list = self.epgcache.search(('RIBND', 1024, eEPGCache.SIMILAR_BROADCASTINGS_SEARCH, refstr, event_id))
if self.list and len(self.list):
self.list.sort(key=lambda x: x[2])
self.l.setList(self.list)
self.selectionChanged()
print time() - t
def fillSingleEPG(self, service):
test = [ 'RIBDT', (service.ref.toString(), 0, -1, -1) ]
self.list = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
self.l.setList(self.list)
self.selectionChanged()
def fillMultiEPG(self, services, stime=None):
test = [ (service.ref.toString(), 0, stime) for service in services ]
test.insert(0, 'X0RIBDTCn')
self.list = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
self.l.setList(self.list)
self.selectionChanged()
def updateMultiEPG(self, direction):
test = [ x[3] and (x[1], direction, x[3]) or (x[1], direction, 0) for x in self.list ]
test.insert(0, 'XRIBDTCn')
epg_data = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
cnt = 0
for x in epg_data:
changecount = self.list[cnt][0] + direction
if changecount >= 0:
if x[2] is not None:
self.list[cnt] = (changecount, x[0], x[1], x[2], x[3], x[4], x[5], x[6])
cnt+=1
self.l.setList(self.list)
self.selectionChanged()
def fillGraphEPG(self, services, stime = None):
if stime is not None:
self.time_base = int(stime)
if services is None:
time_base = self.time_base + self.offs * self.time_epoch * 60
test = [ (service[0], 0, time_base, self.time_epoch) for service in self.list ]
serviceList = self.list
piconIdx = 3
else:
self.cur_event = None
self.cur_service = None
test = [ (service.ref.toString(), 0, self.time_base, self.time_epoch) for service in services ]
serviceList = services
piconIdx = 0
test.insert(0, 'XRnITBD') #return record, service ref, service name, event id, event title, begin time, duration
epg_data = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
self.list = [ ]
tmp_list = None
service = ""
sname = ""
serviceIdx = 0
for x in epg_data:
if service != x[0]:
if tmp_list is not None:
picon = None if piconIdx == 0 else serviceList[serviceIdx][piconIdx]
self.list.append((service, sname, tmp_list[0][0] is not None and tmp_list or None, picon))
serviceIdx += 1
service = x[0]
sname = x[1]
tmp_list = [ ]
tmp_list.append((x[2], x[3], x[4], x[5])) #(event_id, event_title, begin_time, duration)
if tmp_list and len(tmp_list):
picon = None if piconIdx == 0 else serviceList[serviceIdx][piconIdx]
self.list.append((service, sname, tmp_list[0][0] is not None and tmp_list or None, picon))
serviceIdx += 1
self.l.setList(self.list)
self.findBestEvent()
def sortSingleEPG(self, type):
list = self.list
if list:
event_id = self.getSelectedEventId()
if type == 1:
list.sort(key=lambda x: (x[4] and x[4].lower(), x[2]))
else:
assert(type == 0)
list.sort(key=lambda x: x[2])
self.l.invalidate()
self.moveToEventId(event_id)
def getEventRect(self):
rc = self.event_rect
return Rect( rc.left() + (self.instance and self.instance.position().x() or 0), rc.top(), rc.width(), rc.height() )
def getServiceRect(self):
rc = self.service_rect
return Rect( rc.left() + (self.instance and self.instance.position().x() or 0), rc.top(), rc.width(), rc.height() )
def getTimeEpoch(self):
return self.time_epoch
def getTimeBase(self):
return self.time_base + (self.offs * self.time_epoch * 60)
def resetOffset(self):
self.offs = 0
def getClockPixmap(self, refstr, beginTime, duration, eventId):
pre_clock = 1
post_clock = 2
clock_type = 0
endTime = beginTime + duration
for x in self.timer.timer_list:
if x.service_ref.ref.toString() == refstr:
if x.eit == eventId:
return self.clock_pixmap
beg = x.begin
end = x.end
if beginTime > beg and beginTime < end and endTime > end:
clock_type |= pre_clock
elif beginTime < beg and endTime > beg and endTime < end:
clock_type |= post_clock
if clock_type == 0:
return self.clock_add_pixmap
elif clock_type == pre_clock:
return self.clock_pre_pixmap
elif clock_type == post_clock:
return self.clock_post_pixmap
else:
return self.clock_prepost_pixmap
def getPixmapForEntry(self, service, eventId, beginTime, duration):
rec = beginTime and (self.timer.isInTimer(eventId, beginTime, duration, service))
if rec:
clock_pic = self.getClockPixmap(service, beginTime, duration, eventId)
else:
clock_pic = None
return (clock_pic, rec)
def GraphEPGRecRed(self, refstr, beginTime, duration, eventId):
for x in self.timer.timer_list:
if x.service_ref.ref.toString() == refstr:
if x.eit == eventId:
if x.justplay:
return "justplay"
else:
return "record"
return ""
def getSelectedEventId(self):
x = self.l.getCurrentSelection()
return x and x[1]
def moveToEventId(self, eventId):
if not eventId:
return
index = 0
for x in self.list:
if x[1] == eventId:
self.instance.moveSelectionTo(index)
break
index += 1
class TimelineText(HTMLComponent, GUIComponent):
def __init__(self):
GUIComponent.__init__(self)
self.l = eListboxPythonMultiContent()
self.l.setSelectionClip(eRect(0,0,0,0))
self.l.setItemHeight(30);
self.foreColor = 0xffc000
self.borderColor = 0x000000
self.backColor = 0x000000
self.borderWidth = 1
self.time_base = 0
self.time_epoch = 0
self.timelineFontName = "Regular"
self.timelineFontSize = 20
self.datefmt = ""
GUI_WIDGET = eListbox
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "foregroundColor":
self.foreColor = parseColor(value).argb()
elif attrib == "borderColor":
self.borderColor = parseColor(value).argb()
elif attrib == "backgroundColor":
self.backColor = parseColor(value).argb()
elif attrib == "font":
self.l.setFont(0, parseFont(value, ((1,1),(1,1)) ))
elif attrib == "borderWidth":
self.borderWidth = int(value)
elif attrib == "TimelineFont":
font = parseFont(value, ((1,1),(1,1)) )
self.timelineFontName = font.family
self.timelineFontSize = font.pointSize
else:
attribs.append((attrib,value))
self.skinAttributes = attribs
return GUIComponent.applySkin(self, desktop, screen)
def setTimeLineFontsize(self):
self.l.setFont(0, gFont(self.timelineFontName, self.timelineFontSize + config.epgselection.tl_fontsize_vixepg.getValue()))
def postWidgetCreate(self, instance):
self.setTimeLineFontsize()
instance.setContent(self.l)
def setEntries(self, l, timeline_now, time_lines, force):
event_rect = l.getEventRect()
time_epoch = l.getTimeEpoch()
time_base = l.getTimeBase()
if event_rect is None or time_epoch is None or time_base is None:
return
eventLeft = event_rect.left()
res = [ None ]
# Note: event_rect and service_rect are relative to the timeline_text position
# while the time lines are relative to the GraphEPG screen position!
if self.time_base != time_base or self.time_epoch != time_epoch or force:
service_rect = l.getServiceRect()
itemHeight = self.l.getItemSize().height()
time_steps = 60 if time_epoch > 180 else 30
num_lines = time_epoch / time_steps
incWidth = event_rect.width() / num_lines
timeStepsCalc = time_steps * 60
nowTime = localtime(time())
begTime = localtime(time_base)
self.ServiceWidth = service_rect.width()
if nowTime[2] != begTime[2]:
if self.ServiceWidth > 179:
datestr = strftime("%A %d %B", localtime(time_base))
elif self.ServiceWidth > 139:
datestr = strftime("%a %d %B", localtime(time_base))
elif self.ServiceWidth > 129:
datestr = strftime("%a %d %b", localtime(time_base))
elif self.ServiceWidth > 119:
datestr = strftime("%a %d", localtime(time_base))
elif self.ServiceWidth > 109:
datestr = strftime("%A", localtime(time_base))
else:
datestr = strftime("%a", localtime(time_base))
else:
datestr = '%s'%(_("Today"))
res.append( MultiContentEntryText(
pos = (0, 0),
size = (service_rect.width(), itemHeight),
font = 0, flags = RT_HALIGN_LEFT | RT_VALIGN_TOP,
text = _(datestr),
color = self.foreColor, color_sel = self.foreColor,
backcolor = self.backColor, backcolor_sel = self.backColor,
border_width = self.borderWidth, border_color = self.borderColor))
xpos = 0 # eventLeft
for x in range(0, num_lines):
res.append( MultiContentEntryText(
pos = (service_rect.width() + xpos, 0),
size = (incWidth, itemHeight),
font = 0, flags = RT_HALIGN_LEFT | RT_VALIGN_TOP,
text = strftime("%H:%M", localtime( time_base + x*timeStepsCalc )),
color = self.foreColor, color_sel = self.foreColor,
backcolor = self.backColor, backcolor_sel = self.backColor,
border_width = self.borderWidth, border_color = self.borderColor) )
line = time_lines[x]
old_pos = line.position
line.setPosition(xpos + eventLeft, old_pos[1])
line.visible = True
xpos += incWidth
for x in range(num_lines, MAX_TIMELINES):
time_lines[x].visible = False
self.l.setList([res])
self.time_base = time_base
self.time_epoch = time_epoch
now = time()
if now >= time_base and now < (time_base + time_epoch * 60):
xpos = int((((now - time_base) * event_rect.width()) / (time_epoch * 60)) - (timeline_now.instance.size().width() / 2))
old_pos = timeline_now.position
new_pos = (xpos + eventLeft, old_pos[1])
if old_pos != new_pos:
timeline_now.setPosition(new_pos[0], new_pos[1])
timeline_now.visible = True
else:
timeline_now.visible = False
| gpl-2.0 |
jaidevd/scikit-learn | examples/svm/plot_custom_kernel.py | 43 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
canaltinova/servo | tests/wpt/web-platform-tests/webdriver/tests/sessions/new_session/invalid_capabilities.py | 12 | 4145 | #META: timeout=long
import pytest
from webdriver import error
from conftest import product, flatten
@pytest.mark.parametrize("value", [None, 1, "{}", []])
def test_invalid_capabilites(new_session, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": value})
@pytest.mark.parametrize("value", [None, 1, "{}", []])
def test_invalid_always_match(new_session, add_browser_capabilites, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": {"alwaysMatch": value, "firstMatch": [add_browser_capabilites({})]}})
@pytest.mark.parametrize("value", [None, 1, "[]", {}])
def test_invalid_first_match(new_session, add_browser_capabilites, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({}), "firstMatch": value}})
invalid_data = [
("acceptInsecureCerts", [1, [], {}, "false"]),
("browserName", [1, [], {}, False]),
("browserVersion", [1, [], {}, False]),
("platformName", [1, [], {}, False]),
("pageLoadStrategy", [1, [], {}, False, "invalid", "NONE", "Eager", "eagerblah", "interactive",
" eager", "eager "]),
("proxy", [1, [], "{}", {"proxyType": "SYSTEM"}, {"proxyType": "systemSomething"},
{"proxy type": "pac"}, {"proxy-Type": "system"}, {"proxy_type": "system"},
{"proxytype": "system"}, {"PROXYTYPE": "system"}, {"proxyType": None},
{"proxyType": 1}, {"proxyType": []}, {"proxyType": {"value": "system"}},
{" proxyType": "system"}, {"proxyType ": "system"}, {"proxyType ": " system"},
{"proxyType": "system "}]),
("timeouts", [1, [], "{}", False, {"pageLOAD": 10}, {"page load": 10},
{"page load": 10}, {"pageLoad": "10"}, {"pageLoad": {"value": 10}},
{"invalid": 10}, {"pageLoad": -1}, {"pageLoad": 2**64},
{"pageLoad": None}, {"pageLoad": 1.1}, {"pageLoad": 10, "invalid": 10},
{" pageLoad": 10}, {"pageLoad ": 10}]),
("unhandledPromptBehavior", [1, [], {}, False, "DISMISS", "dismissABC", "Accept",
" dismiss", "dismiss "])
]
@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
lambda key, value: {"firstMatch": [{key: value}]}])
@pytest.mark.parametrize("key,value", flatten(product(*item) for item in invalid_data))
def test_invalid_values(new_session, add_browser_capabilites, body, key, value):
capabilities = body(key, value)
if "alwaysMatch" in capabilities:
capabilities["alwaysMatch"] = add_browser_capabilites(capabilities["alwaysMatch"])
else:
capabilities["firstMatch"][0] = add_browser_capabilites(capabilities["firstMatch"][0])
with pytest.raises(error.InvalidArgumentException):
resp = new_session({"capabilities": capabilities})
invalid_extensions = [
"firefox",
"firefox_binary",
"firefoxOptions",
"chromeOptions",
"automaticInspection",
"automaticProfiling",
"platform",
"version",
"browser",
"platformVersion",
"javascriptEnabled",
"nativeEvents",
"seleniumProtocol",
"profile",
"trustAllSSLCertificates",
"initialBrowserUrl",
"requireWindowFocus",
"logFile",
"logLevel",
"safari.options",
"ensureCleanSession",
]
@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
lambda key, value: {"firstMatch": [{key: value}]}])
@pytest.mark.parametrize("key", invalid_extensions)
def test_invalid_extensions(new_session, add_browser_capabilites, body, key):
capabilities = body(key, {})
if "alwaysMatch" in capabilities:
capabilities["alwaysMatch"] = add_browser_capabilites(capabilities["alwaysMatch"])
else:
capabilities["firstMatch"][0] = add_browser_capabilites(capabilities["firstMatch"][0])
with pytest.raises(error.InvalidArgumentException):
resp = new_session({"capabilities": capabilities})
| mpl-2.0 |
vauxoo-dev/account-financial-tools | __unported__/currency_rate_date_check/currency_rate_date_check.py | 7 | 5681 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Currency rate date check module for OpenERP
# Copyright (C) 2012-2013 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
from datetime import datetime
from openerp.tools.translate import _
# Here are some explainations about the design of this module.
# In server/openerp/addons/base/res/res_currency.py :
# compute() -> _get_conversion_rate()
# -> _current_rate() -> _current_rate_computation()
# The date used for the rate is the one in the context
# compute() adds currency_rate_type_from
# and currency_rate_type_to to the context
# _get_conversion_rate() adds currency_rate_type_id to context ;
# its value is currency_rate_type_to ;
# if it doesn't exist it's currency_rate_type_from ;
# if it doesn't exist either it's False
# It already contains raise "No rate found for currency ... at the date ..."
# _current_rate() reads currency_rate_type_id
# from context and uses it in the SQL request
# This is the function used for the definition of
# the field.function 'rate' on res_currency
# Which one of the 3 functions should we inherit ? Good question...
# It's probably better to inherit the lowest level function,
# i.e. _current_rate_computation()
# Advantage : by inheriting the lowest level function,
# we can be sure that the check
# always apply, even for scenarios where we
# read the field "rate" of the obj currency
# => that's the solution I implement in the code below
class res_currency(orm.Model):
_inherit = 'res.currency'
def _current_rate_computation(self, cr, uid, ids, name, arg,
raise_on_no_rate, context=None):
if context is None:
context = {}
# We only do the check if there is an explicit date in the context and
# there is no specific currency_rate_type_id
if context.get('date') and not context.get('currency_rate_type_id') and\
not context.get('disable_rate_date_check'):
for currency_id in ids:
# We could get the company from the currency, but it's not a
# 'required' field, so we should probably continue to get it
# from the user, shouldn't we ?
user = self.pool['res.users'].browse(cr, uid, uid,
context=context)
# if it's the company currency, don't do anything
# (there is just one old rate at 1.0)
if user.company_id.currency_id.id == currency_id:
continue
else:
# now we do the real work !
date = context.get('date',
datetime.today().strftime('%Y-%m-%d'))
date_datetime = datetime.strptime(date, '%Y-%m-%d')
rate_obj = self.pool['res.currency.rate']
selected_rate = rate_obj.search(cr, uid, [
('currency_id', '=', currency_id),
('name', '<=', date),
('currency_rate_type_id', '=', None)
], order='name desc', limit=1, context=context)
if not selected_rate:
continue
rate_date = rate_obj.read(cr, uid, selected_rate[0],
['name'],
context=context)['name']
rate_date_datetime = datetime.strptime(rate_date,
'%Y-%m-%d')
max_delta = user.company_id.currency_rate_max_delta
if (date_datetime - rate_date_datetime).days > max_delta:
currency_name = self.read(cr, uid,
currency_id,
['name'],
context=context)['name']
raise orm.except_orm(
_('Error'),
_('You are requesting a rate conversion on %s for '
'currency %s but the nearest '
'rate before that date is '
'dated %s and the maximum currency '
'rate time delta for '
'your company is %s days') % (
date, currency_name, rate_date, max_delta)
)
# Now we call the regular function from the "base" module
return super(res_currency, self)._current_rate_computation(
cr, uid, ids, name, arg, raise_on_no_rate, context=context)
| agpl-3.0 |
mxjl620/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
gmacchi93/serverInfoParaguay | apps/venv/lib/python2.7/site-packages/django/core/checks/model_checks.py | 525 | 2390 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import types
from django.apps import apps
from django.core.checks import Error, Tags, register
@register(Tags.models)
def check_all_models(app_configs=None, **kwargs):
errors = []
for model in apps.get_models():
if app_configs is None or model._meta.app_config in app_configs:
if not inspect.ismethod(model.check):
errors.append(
Error(
"The '%s.check()' class method is "
"currently overridden by %r." % (
model.__name__, model.check),
hint=None,
obj=model,
id='models.E020'
)
)
else:
errors.extend(model.check(**kwargs))
return errors
@register(Tags.models, Tags.signals)
def check_model_signals(app_configs=None, **kwargs):
"""
Ensure lazily referenced model signals senders are installed.
"""
# Avoid circular import
from django.db import models
errors = []
for name in dir(models.signals):
obj = getattr(models.signals, name)
if isinstance(obj, models.signals.ModelSignal):
for reference, receivers in obj.unresolved_references.items():
for receiver, _, _ in receivers:
# The receiver is either a function or an instance of class
# defining a `__call__` method.
if isinstance(receiver, types.FunctionType):
description = "The '%s' function" % receiver.__name__
else:
description = "An instance of the '%s' class" % receiver.__class__.__name__
errors.append(
Error(
"%s was connected to the '%s' signal "
"with a lazy reference to the '%s' sender, "
"which has not been installed." % (
description, name, '.'.join(reference)
),
obj=receiver.__module__,
hint=None,
id='signals.E001'
)
)
return errors
| apache-2.0 |
kingvuplus/EGAMI-dvbapp | tools/svg2skin.py | 98 | 2458 | #!/usr/bin/python
# don't expect too much.
# this is a really simple&stupid svg parser, which will use rectangles
# and text fields to produce <widget> snippets for a skin.
# use object "id" fields for source names if you want.
# extracting font information is buggy.
# if you want text fields, please use flow text regions, instead of simple
# text. otherwise, width and height are unknown.
#
# tested only with a single inkscape-generated SVG.
import sys
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
def getattrs(attrs, *a):
res = []
for x in a:
res.append(float(attrs[x]))
return res
def parsedict(attrs):
if not attrs:
return []
d = attrs.split(';')
r = { }
for x in d:
(key, val) = x.split(':')
r[key] = val
return r
def px(x):
return int(float(x[:-2]) + .5)
def contains(box_o, box_i):
return box_o[0] <= box_i[0] and box_o[1] <= box_i[1] and box_o[2] >= box_i[2] and box_o[3] >= box_i[3]
class parseXML(ContentHandler):
def __init__(self):
self.isPointsElement, self.isReboundsElement = 0, 0
self.bbox = None
self.find_bbox = False
self.flow = None
def startElement(self, name, attrs):
if self.find_bbox:
if name != "rect":
return
box = getattrs(attrs, "x", "y", "width", "height")
if not self.bbox or contains(box, self.bbox):
self.bbox = box
return
if name == "rect":
(x, y, width, height) = getattrs(attrs, "x", "y", "width", "height")
x -= self.bbox[0]
y -= self.bbox[1]
id = attrs["id"]
if self.flow:
id = self.flow
self.flow = None
styles = parsedict(attrs.get("style", ""))
elif name == "text":
(x, y) = getattrs(attrs, "x", "y")
x -= self.bbox[0]
y -= self.bbox[1]
width, height = 0, 0
styles = parsedict(attrs["style"])
id = attrs["id"]
elif name == "flowRoot":
self.flow = attrs["id"]
return
else:
return
if "font-size" in styles:
font = ' font="Regular;%d"' % px(styles["font-size"])
else:
font = ""
print """\t\t<widget source="%s" render="Label" position="%d,%d" size="%d,%d" %s />""" % (id, x, y, width, height, font)
parser = make_parser()
contentHandler = parseXML()
parser.setContentHandler(contentHandler)
contentHandler.find_bbox = True
parser.parse(sys.argv[1])
bboxi = tuple([int(x) for x in contentHandler.bbox])
contentHandler.find_bbox = False
print '\t<screen name="" position="%d,%d" size="%d,%d" title="">' % bboxi
parser.parse(sys.argv[1])
print '\t</screen>'
| gpl-2.0 |
openstack/trove | trove/db/sqlalchemy/migrate_repo/versions/010_add_usage.py | 4 | 1564 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData
from trove.db.sqlalchemy.migrate_repo.schema import create_tables
from trove.db.sqlalchemy.migrate_repo.schema import DateTime
from trove.db.sqlalchemy.migrate_repo.schema import Integer
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table
meta = MetaData()
usage_events = Table(
'usage_events',
meta,
Column('id', String(36), primary_key=True, nullable=False),
Column('instance_name', String(36)),
Column('tenant_id', String(36)),
Column('nova_instance_id', String(36)),
Column('instance_size', Integer()),
Column('nova_volume_id', String(36)),
Column('volume_size', Integer()),
Column('end_time', DateTime()),
Column('updated', DateTime()))
def upgrade(migrate_engine):
meta.bind = migrate_engine
create_tables([usage_events])
| apache-2.0 |
adishjain/youtube-dl | youtube_dl/extractor/crooksandliars.py | 143 | 2060 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
)
class CrooksAndLiarsIE(InfoExtractor):
_VALID_URL = r'https?://embed\.crooksandliars\.com/(?:embed|v)/(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'https://embed.crooksandliars.com/embed/8RUoRhRi',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': 'Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!',
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
'duration': 236,
}
}, {
'url': 'http://embed.crooksandliars.com/v/MTE3MjUtMzQ2MzA',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://embed.crooksandliars.com/embed/%s' % video_id, video_id)
manifest = self._parse_json(
self._search_regex(
r'var\s+manifest\s*=\s*({.+?})\n', webpage, 'manifest JSON'),
video_id)
quality = qualities(('webm_low', 'mp4_low', 'webm_high', 'mp4_high'))
formats = [{
'url': item['url'],
'format_id': item['type'],
'quality': quality(item['type']),
} for item in manifest['flavors'] if item['mime'].startswith('video/')]
self._sort_formats(formats)
return {
'url': url,
'id': video_id,
'title': manifest['title'],
'description': manifest.get('description'),
'thumbnail': self._proto_relative_url(manifest.get('poster')),
'timestamp': int_or_none(manifest.get('created')),
'uploader': manifest.get('author'),
'duration': int_or_none(manifest.get('duration')),
'formats': formats,
}
| unlicense |
luo66/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
tph-thuering/vnetsource | data_services/ingesters/EMOD_ingest.py | 2 | 21167 | ########################################################################################################################
# VECNet CI - Prototype
# Date: 05/02/2013
# Institution: University of Notre Dame
# Primary Authors:
# Lawrence Selvy <Lawrence.Selvy.1@nd.edu>
########################################################################################################################
import shutil
import uuid
import json
import os
import psycopg2
import datetime
from io import BytesIO
from struct import pack
from django.db import connections, transaction
from django.core.exceptions import ObjectDoesNotExist
from data_services.ingesters.base_ingester import Base_ingester
from data_services.models import DimReplication, DimChannel, BaseFactData, DimExecution, SimulationInputFile
from data_services.utils import encode_binary, commit_to_warehouse
import pdb
import os
from vecnet.emod.output import convert_to_csv
class EMOD_ingester(Base_ingester):
"""This is the ingestion script for the EMOD malaria model. This will take values
from output files that EMOD creates and fill tables within the VECNet CI
datawarehouse
"""
FileList = dict() #:
DataList = list() #:
def __init__(self, zip_file=None, execid=None, seriesid=None, chan_list=None, msg=None):
"""
This init method will take several arguments and use those to ingest data into the vecnet-CI datawarehouse
This takes a zip file (the path to the zip file), execid, seriesid, and chan_list. Given this information,
this ingester will create a replication, and then fill that replication with the data in the zip file. If
execid and/or seriesid are not specified, the filename is assumed to be of the type "REPID-XXXX.zip" where
the replication id will be parsed from the file name.
:param str zip_file: A path to a zip file on the file system.
:raises TypeError: When any file address is not of type str
:raises ObjectDoesNotExist: When the replication_id given does not exist in the data warehouse
"""
self.FileList = dict()
self.DataList = dict()
self.alreadyIngested = None
# the files needed by EMOD
self.FILES_OF_INTEREST = ('DemographicsSummary', 'VectorSpeciesReport', 'InsetChart', 'BinnedReport')
# create a unique temporary directory for the needed files
self.BASE_PATH = os.path.sep + 'tmp'
self.temporary_path = self.BASE_PATH + os.path.sep + str(uuid.uuid4())
# get the replication ID from the filename. filename is expected to be of the following format
# path/to/file/file_name-replication_number.zip
if not execid and not seriesid:
replication_id = int(os.path.basename(zip_file).strip(".zip").split("-")[1])
try:
self.replication = DimReplication.objects.get(pk=replication_id)
except:
raise ObjectDoesNotExist('No replication with id %s is available in the DB' % replication_id)
else:
execution = DimExecution.objects.filter(pk=execid)
if not execution.exists():
raise ObjectDoesNotExist("No execution with id %s is available in the DB" % execid)
replication = execution[0].dimreplication_set.filter(series_id=seriesid)
if not replication.exists():
replication = DimReplication(
execution_key=execution[0],
series_id=seriesid,
seed_used=-1
)
replication.save()
self.replication = replication
else:
self.replication = replication[0]
if zip_file is None:
if msg is None:
msg = "An error has occurred during the processing of this replication"
self.set_status(-1, msg)
return
input_files = self.unpack_files(zip_file)
VSRF = input_files['VectorSpeciesReport'] if 'VectorSpeciesReport' in input_files else ''
DSF = input_files['DemographicsSummary'] if 'DemographicsSummary' in input_files else ''
ICF = input_files['InsetChart'] if 'InsetChart' in input_files else ''
BRF = input_files['BinnedReport'] if 'BinnedReport' in input_files else ''
if not isinstance(VSRF, str):
raise TypeError('VSRF must be a string containing the file address to the VSRF')
elif VSRF is not '':
self.FileList['VectorSpeciesReport'] = VSRF
if not isinstance(DSF, str):
raise TypeError('DSF must be a string containing the file address to the DSF')
elif DSF is not '':
self.FileList['DemographicsSummary'] = DSF
if not isinstance(ICF, str):
raise TypeError('ICF must be a string containing the file address to the ICF')
elif ICF is not '':
self.FileList['InsetChart'] = ICF
if not isinstance(BRF, str):
raise TypeError('BRF must be a string containing the file address to the BRF')
elif BRF is not '':
self.FileList['BinnedReport'] = BRF
# -------------- Grab the channel listing and other objects needed for the ingester
# replication = DimReplication.objects.filter(pk=replication_id)
# if not replication.exists():
# raise ObjectDoesNotExist('Replication with id %s does not exist' % replication_id)
# replication = replication[0]
self.run = self.replication.execution_key.run_key
# Attach CSV output for non-sweep runs only
if self.run.numjobs() == 1:
convert_to_csv(self.temporary_path)
filename = os.path.join(self.temporary_path, "output.csv.zip")
csv_file = SimulationInputFile.objects.create_file(contents=open(filename, "rb").read(),
name="output.csv.zip",
created_by_id=1)
self.run.csv_output = csv_file
self.run.save()
if self.run.models_key.model != 'EMOD':
raise ValueError("Target Run is not EMOD during EMOD submission!")
if chan_list is not None and isinstance(chan_list, list):
chans = DimChannel.objects.filter(pk__in=chan_list)
if not chans.exists():
raise ObjectDoesNotExist('No channels were found with IDs %s' % chan_list)
new_chans = [self.run.dimchannel_set.add(channel) for channel in chans]
if len(new_chans) != len(chans):
print "WARNING: Not all channels in list were ingested"
self.run.save()
else:
chans = self.run.dimchannel_set.all()
self.Channel_dict = dict()
for channel in chans:
if channel.file_name not in self.Channel_dict:
self.Channel_dict[channel.file_name] = list()
self.Channel_dict[channel.file_name].append(channel)
self.Channel_dict['VectorSpeciesReport'] = list()
self.alreadyIngested = BaseFactData.objects.filter(
run_key=self.run,
replication_key=self.replication
).distinct('channel_key')
self.alreadyIngested = [data.channel_key for data in self.alreadyIngested]
# self.Channel_list = [x.file_name for x in chans]
# Setup class variables
self.fact_data_name = 'fact_data_run_%s' % self.run.id
self.cursor = connections['default'].cursor()
self.cursor.execute("select nextval('base_fact_data_id_seq');")
self.next_id = int(self.cursor.fetchone()[0])
# Due to the limitations of the django ORM, we have to use psycopg2 directly
def __del__(self):
"""
This is the exit method and will cleanup temporary files, even if there is an exception. This will walk the
tree of the temporary path, deleting all files and folders therein, and then removing the temporary directory
itself.
"""
if self.temporary_path:
self._cleanupIngester()
@classmethod
def rep_failed(cls, execid, seriesid, msg=None):
"""
This is the pathway for failed replications returning from computation
If a replication fails, the seriesid and execution id will be passed in to
this alternate constructor for the ingester. It will create the appropriate
replication if it does not exist (it will check for the existence of the
same replication via checking the seriesid and execution id) and then set
the status. It will further set the status text to msg unless msg is empty,
if it is it will fill the text with a default status string.
*NOTE* This does not take a zip file
:param execid: Execution ID of the failed replication
:param seriesid: Series ID of the failed replication
:param msg: Status message to set the failed replication to.
"""
if not isinstance(execid, int) and not isinstance(seriesid, int):
raise ValueError("Execid and seriesid are required to be integers")
return cls(
zip_file=None,
execid=execid,
seriesid=seriesid,
msg=msg
)
def ingest(self):
""" This ingest method will step through all of the files in the file list and
add the data contained within them to the database. This method is also the
"main" for this ingester, and contains byte conversion and the like
:returns: Nothing
"""
t1 = datetime.datetime.now()
# Preprocess
# self.preProcess()
# Ready the byte container and add the Postgres Header
cpy = BytesIO()
cpy.write(pack('!11sii', b'PGCOPY\n\377\r\n\0', 0, 0))
for file_type in self.Channel_dict.keys():
try:
parser = self.parse_file(str(file_type))
self.next_id = parser(str(file_type), cpy, self.next_id)
# self.parse_file(str(file_type), cpy, self.next_id)
except KeyError as detail:
self.set_status(-1, "Key error, a key of %s was not found" % detail)
return
# Close the file as per postgresql docs
cpy.write(pack('!h', -1))
commit_to_warehouse(cpy, connections['default'].settings_dict, 'base_fact_data', self.next_id)
#cpy.seek(0)
#conn_dict = connections['default'].settings_dict
#conn = psycopg2.connect(
# host=conn_dict['HOST'],
# port=conn_dict['PORT'],
# user=conn_dict['USER'],
# password=conn_dict['PASSWORD'],
# database=conn_dict['NAME'])
#data_cursor = conn.cursor()
#data_cursor.copy_expert('COPY base_fact_data FROM STDIN WITH BINARY', cpy)
#data_cursor.close()
#conn.commit()
#conn.close()
# Reset the sequence counter to the appropriate value
#cursor.execute('ALTER SEQUENCE base_fact_data_id_seq RESET %s' % next_id)
# Re-add all constraints
# TODO: Also include re-adding indexes if they were deleted or creating new ones.
# self.postProcess()
t2 = datetime.datetime.now()
print "Ingestion process took ", t2-t1
# cleanup the files
print "Removing the temporary files"
if self.temporary_path:
shutil.rmtree(self.temporary_path)
# Replication and run status update
self.set_status(0)
self.run.set_status()
# print self.run.status
self.run.save()
return
def parse_file(self, file_type):
"""
This method is responsible for choosing which method to use on which file. This is being used as a replacement
for the swicth stack.
This accomplishes this by returning a ByteIO containing the information contained within the file.
:param file_type: Type of file (ex BRF, ICF, etc)
:type file_type: str
:param pg_index: Index in data table to start with
:type pg_index: int
:param pg_io: BytesIO object to fill with data from a particular channel
:type pg_io: BytesIO
"""
return{
'VectorSpeciesReport': self.parse_vector_species
}.get(file_type, self.parse_generic_file)
def parse_generic_file(self, file_type, pg_io, pg_index):
"""
This method is responsible for parsing the DemographicsSummary, BinnedReport, and InsetChart files for
the EMOD model. It does not parse the VectorSpeciesReport file.
:param file_type: Type of file (ex BRF, ICF, etc)
:type file_type: str
:param pg_index: Index in data table to start with
:type pg_index: int
:param pg_io: BytesIO object to fill with data from a particular channel
:type pg_io: BytesIO
"""
filename = self.FileList[file_type]
file_json = json.loads(open(filename, 'r').read())
channel_list = self.Channel_dict[file_type]
for chan in channel_list:
# To ensure that a single channel is never ingested more than once
if chan in self.alreadyIngested:
continue
try:
if chan.type is not None:
type_ndx = file_json['Header']['Subchannel_Metadata']['MeaningPerAxis'][0].index(chan.type)
data = file_json['Channels'][chan.title]['Data'][type_ndx]
else:
data = file_json['Channels'][chan.title]['Data']
pg_index = self.transform_data(data, chan, pg_io, pg_index)
except KeyError:
title = chan.title
if chan.type is not None:
title += ' - ' + chan.type
self.set_status(-1, 'Channel %s not found in output file; Ingestion Failed' % title)
raise KeyError('Channel %s not found in output file; Ingestion Failed' % title)
return pg_index
def parse_vector_species(self, file_type, pg_io, pg_index):
"""
This method is responsible for parsing the VectorSpecies file. As all vector species quantities should ingested
for each run, this file will be completely ingested, and will create channels and add those channels to the
run if necessary.
:param file_type: Type of file (ex BRF, ICF, etc)
:type file_type: str
:param pg_index: Index in data table to start with
:type pg_index: int
:param pg_io: BytesIO object to fill with data from a particular channel
:type pg_io: BytesIO
"""
filename = self.FileList[file_type]
file_obj = open(filename, 'r')
file_json = json.loads(file_obj.read())
file_obj.close()
species_list = file_json['Header']['Subchannel_Metadata']['MeaningPerAxis'][0]
channels = file_json['Channels'].keys()
for chan_ndx, chan in enumerate(channels):
try:
data_lists = file_json['Channels'][channels[chan_ndx]]['Data']
for spec_ndx, spec_data in enumerate(data_lists):
channel = DimChannel.objects.get_or_create(
title=chan,
type=species_list[spec_ndx],
file_name='VectorSpeciesReport'
)
channel = channel[0]
# To ensure that a single channel is never ingested more than once
if channel in self.alreadyIngested:
continue
self.run.dimchannel_set.add(channel)
pg_index = self.transform_data(spec_data, channel, pg_io, pg_index)
except KeyError:
title = chan.title
if chan.type is not None:
title += ' - ' + chan.type
self.set_status(-1, 'Channel %s not found in output file; Ingestion Failed' % title)
raise KeyError('Channel %s not found in output file; Ingestion Failed' % title)
self.run.save()
return pg_index
def preProcess(self):
""" This pre-processing method 'turns off' the foreign-key constraints and uniqueness constraints on
the fact data table being ingested into
:returns: Nothing
"""
# Drop Constraints
self.cursor.execute("select conname from pg_constraint where conname = 'base_fact_data_fk_channel_key'")
if self.cursor.fetchone() is not None:
self.cursor.execute('alter table base_fact_data drop constraint base_fact_data_fk_channel_key;')
transaction.commit_on_success(using='default')
# cursor.execute('alter table base_fact_data drop constraint base_fact_data_fk_run_key')
self.cursor.execute("select conname from pg_constraint where conname = '%s_run_key_check';" % self.fact_data_name)
if self.cursor.fetchone() is not None:
self.cursor.execute('alter table %(fact_table)s drop constraint %(fact_table)s_run_key_check;' % {'fact_table': self.fact_data_name})
transaction.commit_on_success(using='default')
self.cursor.close()
return
def postProcess(self):
""" This is post-processing method restores that constraints that were 'turned off' in pre-processing.
It then will validate all new data.
:returns: Nothing
"""
self.cursor = connections['default'].cursor()
# Re-create constraints
self.cursor.execute("select conname from pg_constraint where conname = 'base_fact_data_fk_channel_key'")
if self.cursor.fetchone() is None:
query = "ALTER TABLE base_fact_data ADD CONSTRAINT base_fact_data_fk_channel_key FOREIGN KEY (channel_key) REFERENCES dim_channel (id) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION;"
self.cursor.execute(query)
transaction.commit_on_success(using='default')
# query = """
# ALTER TABLE base_fact_data ADD CONSTRAINT base_fact_data_fk_run_key FOREIGN KEY (run_key)
# REFERENCES dim_run (id) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION
# """
# cursor.execute(query)
#self.conn.commit()
self.cursor.execute("select conname from pg_constraint where conname = '%s_run_key_check'" % self.fact_data_name)
if self.cursor.fetchone() is None:
query = """
ALTER TABLE %(fact_table)s ADD CONSTRAINT %(fact_table)s_run_key_check CHECK (run_key = %(run_id)s);
""" % {'fact_table': self.fact_data_name, 'run_id': self.run.id}
self.cursor.execute(query)
transaction.commit_on_success(using='default')
# Validate all constraints
# cursor.execute('ALTER TABLE base_fact_data VALIDATE CONSTRAINT base_fact_data_fk_channel_key')
# self.conn.commit()
# cursor.execute('ALTER TABLE base_fact_data VALIDATE CONSTRAINT base_fact_data_fk_run_key')
#self.conn.commit()
# cursor.execute('ALTER TABLE %(fact_table)s VALIDATE CONSTRAINT %(fact_table)s_run_key_check'
# % {'fact_table': self.fact_data_name})
# self.conn.commit()
return
def transform_data(self, data, channel, pg_io, pg_index):
"""
The purpose of this method is to transform the data as a list into a binary data. Originally it was part of
each parse_file call, but it was refactored to here. Not the base_ingester class, as only EMOD currently uses
this, but the OM ingester could easily be refactored for this.
:param data: list of data where the index of any given row is its timestep
:type data: list
:param channel: Channel that the data should be associated with
:type channel: DimChannel
:param pg_io: BytesIO to which to add data
:type pg_io: BytesIO
:param pg_index: Index for which the data should be inserted at
:type pg_index: int
"""
if not isinstance(data, list):
raise ValueError('Data must be a list of numbers, received %s', type(data))
if not isinstance(channel, DimChannel):
raise ValueError('Channel must be an instance of DimChannel, received %s' % type(channel))
if not isinstance(pg_io, BytesIO):
raise ValueError('pg_io must be a BytesIO instance to which the data will be appended, received %s' % type(pg_io))
if not isinstance(pg_index, int):
raise ValueError('pg_index must be an integer, received %s' % type(pg_index))
for ts, datum in enumerate(data):
pg_io.write(
encode_binary(
pg_id=pg_index,
timestep=ts,
value=datum,
channel_key=channel.id,
run_key=self.run.id,
replication_key=self.replication.id)
)
pg_index += 1
return pg_index
| mpl-2.0 |
zzicewind/nova | nova/tests/unit/virt/xenapi/client/test_objects.py | 80 | 3981 | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.xenapi import stubs
from nova import utils
from nova.virt.xenapi.client import objects
class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPISessionObjectTestCase, self).setUp()
self.session = mock.Mock()
self.obj = objects.XenAPISessionObject(self.session, "FAKE")
def test_call_method_via_attr(self):
self.session.call_xenapi.return_value = "asdf"
result = self.obj.get_X("ref")
self.assertEqual(result, "asdf")
self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref")
class ObjectsTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(ObjectsTestCase, self).setUp()
self.session = mock.Mock()
def test_VM(self):
vm = objects.VM(self.session)
vm.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
def test_SR(self):
sr = objects.SR(self.session)
sr.get_X("ref")
self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
def test_VDI(self):
vdi = objects.VDI(self.session)
vdi.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
def test_VBD(self):
vbd = objects.VBD(self.session)
vbd.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
def test_PBD(self):
pbd = objects.PBD(self.session)
pbd.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
def test_PIF(self):
pif = objects.PIF(self.session)
pif.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
def test_VLAN(self):
vlan = objects.VLAN(self.session)
vlan.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
def test_host(self):
host = objects.Host(self.session)
host.get_X("ref")
self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
def test_network(self):
network = objects.Network(self.session)
network.get_X("ref")
self.session.call_xenapi.assert_called_once_with("network.get_X",
"ref")
def test_pool(self):
pool = objects.Pool(self.session)
pool.get_X("ref")
self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
class VBDTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VBDTestCase, self).setUp()
self.session = mock.Mock()
self.session.VBD = objects.VBD(self.session)
def test_plug(self):
self.session.VBD.plug("vbd_ref", "vm_ref")
self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
def test_unplug(self):
self.session.VBD.unplug("vbd_ref", "vm_ref")
self.session.call_xenapi.assert_called_once_with("VBD.unplug",
"vbd_ref")
@mock.patch.object(utils, 'synchronized')
def test_vbd_plug_check_synchronized(self, mock_synchronized):
self.session.VBD.unplug("vbd_ref", "vm_ref")
mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
| apache-2.0 |
zopyx/pyfilesystem | fs/expose/django_storage.py | 7 | 1948 | """
fs.expose.django
================
Use an FS object for Django File Storage
This module exposes the class "FSStorage", a simple adapter for using FS
objects as Django storage objects. Simply include the following lines
in your settings.py::
DEFAULT_FILE_STORAGE = fs.expose.django_storage.FSStorage
DEFAULT_FILE_STORAGE_FS = OSFS('foo/bar') # Or whatever FS
"""
from django.conf import settings
from django.core.files.storage import Storage
from django.core.files import File
from fs.path import abspath, dirname
from fs.errors import convert_fs_errors, ResourceNotFoundError
class FSStorage(Storage):
"""Expose an FS object as a Django File Storage object."""
def __init__(self, fs=None, base_url=None):
"""
:param fs: an FS object
:param base_url: The url to prepend to the path
"""
if fs is None:
fs = settings.DEFAULT_FILE_STORAGE_FS
if base_url is None:
base_url = settings.MEDIA_URL
base_url = base_url.rstrip('/')
self.fs = fs
self.base_url = base_url
def exists(self, name):
return self.fs.isfile(name)
def path(self, name):
path = self.fs.getsyspath(name)
if path is None:
raise NotImplementedError
return path
@convert_fs_errors
def size(self, name):
return self.fs.getsize(name)
@convert_fs_errors
def url(self, name):
return self.base_url + abspath(name)
@convert_fs_errors
def _open(self, name, mode):
return File(self.fs.open(name, mode))
@convert_fs_errors
def _save(self, name, content):
self.fs.makedir(dirname(name), allow_recreate=True, recursive=True)
self.fs.setcontents(name, content)
return name
@convert_fs_errors
def delete(self, name):
try:
self.fs.remove(name)
except ResourceNotFoundError:
pass
| bsd-3-clause |
karlnapf/kameleon-mcmc | kameleon_mcmc/mcmc/samplers/GMMMetropolis.py | 1 | 3990 | from modshogun import GMM, RealFeatures
from numpy import zeros
from numpy.ma.extras import unique
from numpy.random import randint
from kameleon_mcmc.distribution.Discrete import Discrete
from kameleon_mcmc.distribution.Gaussian import Gaussian
from kameleon_mcmc.distribution.MixtureDistribution import MixtureDistribution
from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis
class GMMMetropolis(StandardMetropolis):
'''
Runs StandardMetropolis for a number of iterations, performs a couple of
EM instances to fit a Gaussian Mixture Model which is subsequently used
as a static proposal distribution
'''
def __init__(self, distribution, num_components, num_sample_discard=1000,
num_samples_gmm=1000, num_samples_when_to_switch=40000, num_runs_em=1):
StandardMetropolis.__init__(self, distribution)
self.num_components = num_components
self.num_sample_discard = num_sample_discard
self.num_samples_gmm = num_samples_gmm
self.num_samples_when_to_switch = num_samples_when_to_switch
self.num_runs_em = num_runs_em
# start with empty proposal, is changed to something in adapt method
self.proposal = None
def __str__(self):
s = self.__class__.__name__ + "=["
s += "num_components=" + str(self.num_components)
s += ", num_sample_discard=" + str(self.num_sample_discard)
s += ", num_samples_gmm=" + str(self.num_samples_gmm)
s += ", num_runs_em=" + str(self.num_runs_em)
s += ", " + StandardMetropolis.__str__(self)
s += "]"
return s
def adapt(self, mcmc_chain, step_output):
# only learn the proposal once, at a pre-specified iteration
if mcmc_chain.iteration == self.num_samples_when_to_switch:
iter_no = mcmc_chain.iteration
inds = randint(iter_no - self.num_sample_discard, size=self.num_samples_gmm) + self.num_sample_discard
unique_inds = unique(inds)
self.proposal = self.fit_gmm(mcmc_chain.samples[unique_inds])
#idx_left = self.num_sample_discard
#idx_right = self.num_sample_discard + self.num_samples_gmm
#samples = mcmc_chain.samples[idx_left:idx_right]
#self.proposal = self.fit_gmm(samples)
def construct_proposal(self, y):
# fixed proposal exists from a certain iteration, return std MH otherwise
# was created in adapt method
if self.proposal is not None:
return self.proposal
else:
return StandardMetropolis.construct_proposal(self, y)
def fit_gmm(self, samples):
"""
Runs a couple of em instances on random starting points and returns
internal GMM representation of best instance
"""
features = RealFeatures(samples.T)
gmms = []
log_likelihoods = zeros(self.num_runs_em)
for i in range(self.num_runs_em):
# set up Shogun's GMM class and run em (corresponds to random
# initialisation)
gmm = GMM(self.num_components)
gmm.set_features(features)
log_likelihoods[i] = gmm.train_em()
gmms.append(gmm)
max_idx = log_likelihoods.argmax()
# construct Gaussian mixture components in internal representation
components = []
for i in range(self.num_components):
mu = gmms[max_idx].get_nth_mean(i)
Sigma = gmms[max_idx].get_nth_cov(i)
components.append(Gaussian(mu, Sigma))
# construct a Gaussian mixture model based on the best EM run
pie = gmms[max_idx].get_coef()
proposal = MixtureDistribution(components[0].dimension,
self.num_components, components,
Discrete(pie))
return proposal
| bsd-2-clause |
chrisma/ScrumLint | metricsapp/templatetags/artifactformat.py | 1 | 3754 | from django import template
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.template.defaultfilters import slugify
register = template.Library()
def trunc(string, length=75, end='..'):
if len(string) > length:
return string[:length] + end
return string
def is_issue(obj):
if not isinstance(obj, dict):
return False
return 'issues' in obj.get('url','')
def is_commit(obj):
if not isinstance(obj, dict):
return False
return 'commits' in obj.get('url','')
def is_user(obj):
if not isinstance(obj, dict):
return False
return 'users' in obj.get('url','')
def _format_issue(issue, esc):
"""Format a Github issue object for display.
esc : escape function
"""
issue_link = '<a href="{href}" target="_blank">#{number}</a>: {title}'
issue_html = issue_link.format(
href=esc(issue['html_url']),
number=esc(issue['number']),
title=esc(trunc(issue['title']))
)
return mark_safe(issue_html)
def _format_commit(commit, esc):
"""Format a Github commit object for display.
esc : escape function
"""
commit_link = '<a href="{href}" target="_blank" title="{html_title} | {author}">[{sha}] {commit_message}</a>'
commit_html = commit_link.format(
href=esc(commit['html_url']),
commit_message=esc(trunc(commit['commit_message'], length=50)),
sha=esc(trunc(commit['sha'], length=6, end='')),
author=esc(commit['commit_author_name']),
html_title=esc(commit['commit_message'])
)
return mark_safe(commit_html)
def _format_user(user, esc):
# avatar_url
"""Format a Github user object for display.
esc : escape function
"""
user_link = '<a href="{href}" target="_blank" title="Github ID:{html_title}"><img style="height: 30px; border-radius: 15px;" src="{avatar_url}"> {login}</a>'
user_html = user_link.format(
href=esc(user['html_url']),
html_title=esc(user['id']),
login=esc(user['login']),
avatar_url=esc(user['avatar_url'])
)
return mark_safe(user_html)
def float_format(f):
formatted = "{0:.2f}".format(f)
if formatted.endswith('.00'):
return formatted[:-3]
return formatted
@register.filter(name='format_artifact', needs_autoescape=True)
def format_artifact(value, autoescape=True):
esc = conditional_escape if autoescape else lambda x: x
# Check if input is a list
if isinstance(value, list):
if all([is_user(e) for e in value]):
return mark_safe(', '.join([_format_user(e, esc) for e in value]))
if all([is_issue(e) for e in value]):
return mark_safe('<br>'.join([_format_issue(e, esc) for e in value]))
if all([is_commit(e) for e in value]):
return mark_safe('<br>'.join([_format_commit(e, esc) for e in value]))
if all([isinstance(e,str) or isinstance(e,unicode) for e in value]):
return mark_safe(', '.join(value))
if all([isinstance(e,float) for e in value]):
return mark_safe('<br>'.join([float_format(f) for f in value]))
if all([isinstance(e,int) for e in value]):
return mark_safe('<br>'.join(map(str,value)))
return value
# Input is not a list
if isinstance(value, float):
return float_format(value)
if is_issue(value):
return _format_issue(value, esc)
if is_commit(value):
return _format_commit(value, esc)
if is_user(value):
return _format_user(value, esc)
return value
@register.filter(name='un_underscore')
def un_underscore(string):
return string.replace('_', ' ')
@register.filter(name='floor_to_multiple')
def floor_to_multiple(num, multiple=10):
"""Round down number to nearest multiple"""
num = float(num)
multiple = float(multiple)
return num - (num % multiple)
@register.filter(name='to_js_var')
def to_js_var(string):
"""Convert string to valid Javascript variable name"""
string = slugify(str(string))
string = string.replace('-','_')
return string
| mit |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/mako/exceptions.py | 7 | 13158 | # mako/exceptions.py
# Copyright 2006-2019 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes"""
import sys
import traceback
from mako import compat
from mako import util
class MakoException(Exception):
pass
class RuntimeException(MakoException):
pass
def _format_filepos(lineno, pos, filename):
if filename is None:
return " at line: %d char: %d" % (lineno, pos)
else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(
self, message + _format_filepos(lineno, pos, filename)
)
self.lineno = lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(
self, message + _format_filepos(lineno, pos, filename)
)
self.lineno = lineno
self.pos = pos
self.filename = filename
self.source = source
class UnsupportedError(MakoException):
"""raised when a retired feature is used."""
class NameConflictError(MakoException):
"""raised when a reserved word is used inappropriately"""
class TemplateLookupException(MakoException):
pass
class TopLevelLookupException(TemplateLookupException):
pass
class RichTraceback(object):
"""Pull the current exception from the ``sys`` traceback and extracts
Mako-specific template information.
See the usage examples in :ref:`handling_exceptions`.
"""
def __init__(self, error=None, traceback=None):
self.source, self.lineno = "", 0
if error is None or traceback is None:
t, value, tback = sys.exc_info()
if error is None:
error = value or t
if traceback is None:
traceback = tback
self.error = error
self.records = self._init(traceback)
if isinstance(self.error, (CompileException, SyntaxException)):
self.source = self.error.source
self.lineno = self.error.lineno
self._has_source = True
self._init_message()
@property
def errorname(self):
return compat.exception_name(self.error)
def _init_message(self):
"""Find a unicode representation of self.error"""
try:
self.message = compat.text_type(self.error)
except UnicodeError:
try:
self.message = str(self.error)
except UnicodeEncodeError:
# Fallback to args as neither unicode nor
# str(Exception(u'\xe6')) work in Python < 2.6
self.message = self.error.args[0]
if not isinstance(self.message, compat.text_type):
self.message = compat.text_type(self.message, "ascii", "replace")
def _get_reformatted_records(self, records):
for rec in records:
if rec[6] is not None:
yield (rec[4], rec[5], rec[2], rec[6])
else:
yield tuple(rec[0:4])
@property
def traceback(self):
"""Return a list of 4-tuple traceback records (i.e. normal python
format) with template-corresponding lines remapped to the originating
template.
"""
return list(self._get_reformatted_records(self.records))
@property
def reverse_records(self):
return reversed(self.records)
@property
def reverse_traceback(self):
"""Return the same data as traceback, except in reverse order.
"""
return list(self._get_reformatted_records(self.reverse_records))
def _init(self, trcback):
"""format a traceback from sys.exc_info() into 7-item tuples,
containing the regular four traceback tuple items, plus the original
template filename, the line number adjusted relative to the template
source, and code line from that line number of the template."""
import mako.template
mods = {}
rawrecords = traceback.extract_tb(trcback)
new_trcback = []
for filename, lineno, function, line in rawrecords:
if not line:
line = ""
try:
(line_map, template_lines, template_filename) = mods[filename]
except KeyError:
try:
info = mako.template._get_module_info(filename)
module_source = info.code
template_source = info.source
template_filename = (
info.template_filename
or info.template_uri
or filename
)
except KeyError:
# A normal .py file (not a Template)
if not compat.py3k:
try:
fp = open(filename, "rb")
encoding = util.parse_encoding(fp)
fp.close()
except IOError:
encoding = None
if encoding:
line = line.decode(encoding)
else:
line = line.decode("ascii", "replace")
new_trcback.append(
(
filename,
lineno,
function,
line,
None,
None,
None,
None,
)
)
continue
template_ln = 1
mtm = mako.template.ModuleInfo
source_map = mtm.get_module_source_metadata(
module_source, full_line_map=True
)
line_map = source_map["full_line_map"]
template_lines = [
line_ for line_ in template_source.split("\n")
]
mods[filename] = (line_map, template_lines, template_filename)
template_ln = line_map[lineno - 1]
if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1]
else:
template_line = None
new_trcback.append(
(
filename,
lineno,
function,
line,
template_filename,
template_ln,
template_line,
template_source,
)
)
if not self.source:
for l in range(len(new_trcback) - 1, 0, -1):
if new_trcback[l][5]:
self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5]
break
else:
if new_trcback:
try:
# A normal .py file (not a Template)
fp = open(new_trcback[-1][0], "rb")
encoding = util.parse_encoding(fp)
if compat.py3k and not encoding:
encoding = "utf-8"
fp.seek(0)
self.source = fp.read()
fp.close()
if encoding:
self.source = self.source.decode(encoding)
except IOError:
self.source = ""
self.lineno = new_trcback[-1][1]
return new_trcback
def text_error_template(lookup=None):
"""Provides a template that renders a stack trace in a similar format to
the Python interpreter, substituting source template filenames, line
numbers and code for that of the originating source template, as
applicable.
"""
import mako.template
return mako.template.Template(
r"""
<%page args="error=None, traceback=None"/>
<%!
from mako.exceptions import RichTraceback
%>\
<%
tback = RichTraceback(error=error, traceback=traceback)
%>\
Traceback (most recent call last):
% for (filename, lineno, function, line) in tback.traceback:
File "${filename}", line ${lineno}, in ${function or '?'}
${line | trim}
% endfor
${tback.errorname}: ${tback.message}
"""
)
def _install_pygments():
global syntax_highlight, pygments_html_formatter
from mako.ext.pygmentplugin import syntax_highlight # noqa
from mako.ext.pygmentplugin import pygments_html_formatter # noqa
def _install_fallback():
global syntax_highlight, pygments_html_formatter
from mako.filters import html_escape
pygments_html_formatter = None
def syntax_highlight(filename="", language=None):
return html_escape
def _install_highlighting():
try:
_install_pygments()
except ImportError:
_install_fallback()
_install_highlighting()
def html_error_template():
"""Provides a template that renders a stack trace in an HTML format,
providing an excerpt of code as well as substituting source template
filenames, line numbers and code for that of the originating source
template, as applicable.
The template's default ``encoding_errors`` value is
``'htmlentityreplace'``. The template has two options. With the
``full`` option disabled, only a section of an HTML document is
returned. With the ``css`` option disabled, the default stylesheet
won't be included.
"""
import mako.template
return mako.template.Template(
r"""
<%!
from mako.exceptions import RichTraceback, syntax_highlight,\
pygments_html_formatter
%>
<%page args="full=True, css=True, error=None, traceback=None"/>
% if full:
<html>
<head>
<title>Mako Runtime Error</title>
% endif
% if css:
<style>
body { font-family:verdana; margin:10px 30px 10px 30px;}
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px;
font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
.highlight { white-space:pre; }
.sampleline { white-space:pre; }
% if pygments_html_formatter:
${pygments_html_formatter.get_style_defs()}
.linenos { min-width: 2.5em; text-align: right; }
pre { margin: 0; }
.syntax-highlighted { padding: 0 10px; }
.syntax-highlightedtable { border-spacing: 1px; }
.nonhighlight { border-top: 1px solid #DFDFDF;
border-bottom: 1px solid #DFDFDF; }
.stacktrace .nonhighlight { margin: 5px 15px 10px; }
.sourceline { margin: 0 0; font-family:monospace; }
.code { background-color: #F8F8F8; width: 100%; }
.error .code { background-color: #FFBDBD; }
.error .syntax-highlighted { background-color: #FFBDBD; }
% endif
</style>
% endif
% if full:
</head>
<body>
% endif
<h2>Error !</h2>
<%
tback = RichTraceback(error=error, traceback=traceback)
src = tback.source
line = tback.lineno
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${tback.errorname}: ${tback.message|h}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = index + 1
%>
% if index + 1 == line:
<%
if pygments_html_formatter:
old_cssclass = pygments_html_formatter.cssclass
pygments_html_formatter.cssclass = 'error ' + old_cssclass
%>
${lines[index] | syntax_highlight(language='mako')}
<%
if pygments_html_formatter:
pygments_html_formatter.cssclass = old_cssclass
%>
% else:
${lines[index] | syntax_highlight(language='mako')}
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="nonhighlight">
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = lineno
%>
<div class="sourceline">${line | syntax_highlight(filename)}</div>
</div>
% endfor
</div>
% if full:
</body>
</html>
% endif
""",
output_encoding=sys.getdefaultencoding(),
encoding_errors="htmlentityreplace",
)
| isc |
amjith/python-prompt-toolkit | prompt_toolkit/utils.py | 1 | 3426 | from __future__ import unicode_literals
import os
import signal
import sys
import threading
from wcwidth import wcwidth
__all__ = (
'Callback',
'DummyContext',
'get_cwidth',
'suspend_to_background_supported',
'is_conemu_ansi',
'is_windows',
'in_main_thread',
)
class Callback(object):
"""
Callbacks wrapper. Used for event propagation.
There are two ways of using it. The first way is to create a callback
instance from a callable and pass it to the code that's going to fire it.
(This can also be used as a decorator.)
::
c = Callback(function)
c.fire()
The second way is that the code who's going to fire the callback, already
created an Callback instance. Then handlers can be attached using the
``+=`` operator::
c = Callback()
c += handler_function # Add event handler.
c.fire() # Fire event.
"""
def __init__(self, func=None):
assert func is None or callable(func)
self._handlers = [func] if func else []
def fire(self, *args, **kwargs):
"""
Trigger callback.
"""
for handler in self._handlers:
handler(*args, **kwargs)
def __iadd__(self, handler):
"""
Add another handler to this callback.
"""
self._handlers.append(handler)
return self
def __isub__(self, handler):
"""
Remove a handler from this callback.
"""
self._handlers.remove(handler)
return self
def __or__(self, other):
"""
Chain two callbacks, using the | operator.
"""
assert isinstance(other, Callback)
def call_both():
self.fire()
other.fire()
return Callback(call_both)
class DummyContext(object):
"""
(contextlib.nested is not available on Py3)
"""
def __enter__(self):
pass
def __exit__(self, *a):
pass
class _CharSizesCache(dict):
"""
Cache for wcwidth sizes.
"""
def __missing__(self, string):
# Note: We use the `max(0, ...` because some non printable control
# characters, like e.g. Ctrl-underscore get a -1 wcwidth value.
# It can be possible that these characters end up in the input
# text.
if len(string) == 1:
result = max(0, wcwidth(string))
else:
result = sum(max(0, wcwidth(c)) for c in string)
self[string] = result
return result
_CHAR_SIZES_CACHE = _CharSizesCache()
def get_cwidth(string):
"""
Return width of a string. Wrapper around ``wcwidth``.
"""
return _CHAR_SIZES_CACHE[string]
def suspend_to_background_supported():
"""
Returns `True` when the Python implementation supports
suspend-to-background. This is typically `False' on Windows systems.
"""
return hasattr(signal, 'SIGTSTP')
def is_windows():
"""
True when we are using Windows.
"""
return sys.platform.startswith('win') # E.g. 'win32', not 'darwin' or 'linux2'
def is_conemu_ansi():
"""
True when the ConEmu Windows console is used.
"""
return is_windows() and os.environ.get('ConEmuANSI', 'OFF') == 'ON'
def in_main_thread():
"""
True when the current thread is the main thread.
"""
return threading.current_thread().__class__.__name__ == '_MainThread'
| bsd-3-clause |
J4LP/eve-wspace | evewspace/Alerts/migrations/0001_initial.py | 20 | 7215 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SubscriptionGroup'
db.create_table('Alerts_subscriptiongroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('desc', self.gf('django.db.models.fields.CharField')(max_length=200)),
('special', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('Alerts', ['SubscriptionGroup'])
# Adding model 'Subscription'
db.create_table('Alerts_subscription', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['Alerts.SubscriptionGroup'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='alert_groups', to=orm['auth.User'])),
))
db.send_create_signal('Alerts', ['Subscription'])
# Adding model 'SubscriptionGroupPermission'
db.create_table('Alerts_subscriptiongrouppermission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user_group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='alert_groups', to=orm['auth.Group'])),
('sub_group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='group_permissions', to=orm['Alerts.SubscriptionGroup'])),
('can_broadcast', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_join', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('Alerts', ['SubscriptionGroupPermission'])
def backwards(self, orm):
# Deleting model 'SubscriptionGroup'
db.delete_table('Alerts_subscriptiongroup')
# Deleting model 'Subscription'
db.delete_table('Alerts_subscription')
# Deleting model 'SubscriptionGroupPermission'
db.delete_table('Alerts_subscriptiongrouppermission')
models = {
'Alerts.subscription': {
'Meta': {'object_name': 'Subscription'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Alerts.SubscriptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alert_groups'", 'to': "orm['auth.User']"})
},
'Alerts.subscriptiongroup': {
'Meta': {'object_name': 'SubscriptionGroup'},
'desc': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'through': "orm['Alerts.Subscription']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'Alerts.subscriptiongrouppermission': {
'Meta': {'object_name': 'SubscriptionGroupPermission'},
'can_broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_join': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sub_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_permissions'", 'to': "orm['Alerts.SubscriptionGroup']"}),
'user_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alert_groups'", 'to': "orm['auth.Group']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Alerts'] | gpl-3.0 |
CroissanceCommune/autonomie | autonomie/views/payment.py | 1 | 6777 | # -*- coding: utf-8 -*-
# * File Name : payments.py
#
# * Copyright (C) 2010 Gaston TJEBBES <g.t@majerti.fr>
# * Company : Majerti ( http://www.majerti.fr )
#
# This software is distributed under GPLV3
# License: http://www.gnu.org/licenses/gpl-3.0.txt
#
# * Creation Date : 02-06-2014
# * Last Modified :
#
# * Project :
#
"""
Views related to payments edition
"""
import logging
from pyramid.httpexceptions import HTTPFound
from autonomie.utils.widgets import ViewLink
from autonomie.models.task import Invoice
from autonomie.models.expense.sheet import ExpenseSheet
from autonomie.forms.tasks.invoice import (
PaymentSchema,
)
from autonomie.forms.expense import (
ExpensePaymentSchema,
)
from autonomie.forms import (
merge_session_with_post,
)
from autonomie.views import (
BaseFormView,
)
log = logging.getLogger(__name__)
def populate_invoice_payment_actionmenu(context, request):
"""
Set the menu items in the request context
"""
link = ViewLink(
u"Voir la facture",
path="/%ss/{id}.html" % context.parent.type_,
id=context.parent.id,
)
request.actionmenu.add(link)
link = ViewLink(
u"Modifier",
"edit.payment",
path="payment",
id=context.id,
_query=dict(action="edit")
)
request.actionmenu.add(link)
link = ViewLink(
u"Supprimer",
"delete.payment",
path="payment",
confirm=u"Êtes-vous sûr de vouloir supprimer ce paiement ?",
id=context.id,
_query=dict(action="delete", come_from=request.referer)
)
request.actionmenu.add(link)
def payment_view(context, request):
"""
Simple payment view
"""
populate_invoice_payment_actionmenu(context, request)
return dict(title=u"Paiement pour la facture {0}"
.format(context.task.official_number))
class PaymentEdit(BaseFormView):
"""
Edit payment view
"""
title = u"Modification d'un paiement"
schema = PaymentSchema()
def populate_actionmenu(self):
return populate_invoice_payment_actionmenu(self.context, self.request)
def before(self, form):
form.set_appstruct(self.context.appstruct())
self.populate_actionmenu()
return form
def get_default_redirect(self):
"""
Get the default redirection path
"""
return self.request.route_path(
"payment",
id=self.context.id
)
def submit_success(self, appstruct):
"""
handle successfull submission of the form
"""
payment_obj = self.context
# update the payment
merge_session_with_post(payment_obj, appstruct)
self.dbsession.merge(payment_obj)
# Check the invoice status
force_resulted = appstruct.pop('resulted', False)
parent = payment_obj.parent
parent = parent.check_resulted(
force_resulted=force_resulted,
user_id=self.request.user.id
)
self.dbsession.merge(parent)
come_from = appstruct.pop('come_from', None)
if come_from is not None:
redirect = come_from
else:
redirect = self.get_default_redirect()
return HTTPFound(redirect)
def populate_expense_payment_actionmenu(context, request):
link = ViewLink(
u"Voir la feuille de notes de dépense",
path="/expenses/{id}",
id=context.parent.id,
)
request.actionmenu.add(link)
link = ViewLink(
u"Modifier",
"edit.payment",
path="expense_payment",
id=context.id,
_query=dict(action="edit")
)
request.actionmenu.add(link)
link = ViewLink(
u"Supprimer",
"delete.payment",
path="expense_payment",
confirm=u"Êtes-vous sûr de vouloir supprimer ce paiement ?",
id=context.id,
_query=dict(action="delete", come_from=request.referer)
)
request.actionmenu.add(link)
def expense_payment_view(context, request):
"""
Simple expense payment view
"""
populate_expense_payment_actionmenu(context, request)
return dict(title=u"Paiement pour la note de dépense {0}"
.format(context.parent.id))
class ExpensePaymentEdit(PaymentEdit):
schema = ExpensePaymentSchema()
def populate_actionmenu(self):
return populate_expense_payment_actionmenu(
self.context,
self.request,
)
def get_default_redirect(self):
"""
Get the default redirection path
"""
return self.request.route_path(
"expense_payment",
id=self.context.id
)
def payment_delete(context, request):
"""
Payment deletion view
"""
parent = context.parent
request.dbsession.delete(context)
parent = parent.check_resulted(user_id=request.user.id)
request.dbsession.merge(parent)
request.session.flash(u"Le paiement a bien été supprimé")
if 'come_from' in request.GET:
redirect = request.GET['come_from']
elif isinstance(parent, Invoice):
redirect = request.route_path(
"/invoices/{id}.html",
id=parent.id
)
elif isinstance(parent, ExpenseSheet):
redirect = request.route_path("expensesheet", id=parent.id)
return HTTPFound(redirect)
def add_routes(config):
"""
Add module's related routes
"""
config.add_route(
"payment",
"/payments/{id:\d+}",
traverse="/payments/{id}",
)
config.add_route(
"expense_payment",
"/expense_payments/{id:\d+}",
traverse="/expense_payments/{id}",
)
def includeme(config):
add_routes(config)
config.add_view(
payment_view,
route_name="payment",
permission="view.payment",
renderer="/payment.mako",
)
config.add_view(
PaymentEdit,
route_name="payment",
permission="edit.payment",
request_param='action=edit',
renderer="/base/formpage.mako",
)
config.add_view(
payment_delete,
route_name="payment",
permission="delete.payment",
request_param="action=delete",
)
config.add_view(
expense_payment_view,
route_name="expense_payment",
permission="view.payment",
renderer="/payment.mako",
)
config.add_view(
ExpensePaymentEdit,
route_name="expense_payment",
permission="edit.payment",
request_param='action=edit',
renderer="/base/formpage.mako",
)
config.add_view(
payment_delete,
route_name="expense_payment",
permission="delete.payment",
request_param="action=delete",
)
| gpl-3.0 |
CorySpitzer/ansible | v1/ansible/cache/redis.py | 117 | 3477 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import collections
# FIXME: can we store these as something else before we ship it?
import sys
import time
try:
import simplejson as json
except ImportError:
import json
from ansible import constants as C
from ansible.utils import jsonify
from ansible.cache.base import BaseCacheModule
try:
from redis import StrictRedis
except ImportError:
print "The 'redis' python module is required, 'pip install redis'"
sys.exit(1)
class CacheModule(BaseCacheModule):
"""
A caching module backed by redis.
Keys are maintained in a zset with their score being the timestamp
when they are inserted. This allows for the usage of 'zremrangebyscore'
to expire keys. This mechanism is used or a pattern matched 'scan' for
performance.
"""
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(':')
else:
connection = []
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = StrictRedis(*connection)
self._keys_set = 'ansible_cache_keys'
def _make_key(self, key):
return self._prefix + key
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the zset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return json.loads(value)
def set(self, key, value):
value2 = jsonify(value)
if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
self._cache.setex(self._make_key(key), int(self._timeout), value2)
else:
self._cache.set(self._make_key(key), value2)
self._cache.zadd(self._keys_set, time.time(), key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)
def keys(self):
self._expire_keys()
return self._cache.zrange(self._keys_set, 0, -1)
def contains(self, key):
self._expire_keys()
return (self._cache.zrank(self._keys_set, key) >= 0)
def delete(self, key):
self._cache.delete(self._make_key(key))
self._cache.zrem(self._keys_set, key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
# FIXME: there is probably a better way to do this in redis
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
| gpl-3.0 |
geekboxzone/mmallow_external_parameter-framework | test/functional-tests/ACTCampaignEngine.py | 20 | 3241 | #!/usr/bin/python2
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Create a test suite for all tests about SET/GET commands
Uses PfwSetTsetSuite to create a single instance of the HAL
for all the SET/GEt commands.
These commands are tested using the methods of the classes
"BooleanTestCase", etc...
"""
import sys
import os
import unittest
import shutil
from Util import PfwUnitTestLib
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def testsRunner(testDirectory):
tests = unittest.defaultTestLoader.discover(testDirectory, pattern='t*.py')
runner = unittest.TextTestRunner(verbosity=2)
return runner.run(tests).wasSuccessful()
def main():
pfw_root = os.environ["PFW_ROOT"]
pfw_result = os.environ["PFW_RESULT"]
xml_path = "xml/configuration/ParameterFrameworkConfiguration.xml"
os.environ["PFW_TEST_TOOLS"] = os.path.dirname(os.path.abspath(__file__))
os.environ["PFW_TEST_CONFIGURATION"] = os.path.join(pfw_root, xml_path)
try:
# This directory must not exist. An exception will be raised if it does.
os.makedirs(pfw_result)
isAlive = os.path.join(pfw_result,"isAlive")
with open(isAlive, 'w') as fout:
fout.write('true')
needResync = os.path.join(pfw_result,"needResync")
with open(needResync, 'w') as fout:
fout.write('false')
success = testsRunner('PfwTestCase')
finally:
shutil.rmtree(pfw_result)
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()
| bsd-3-clause |
kmaglione/amo-validator | tests/test_submain.py | 1 | 11482 | import mock
import time
from nose.tools import eq_
from validator import submain
from validator.chromemanifest import ChromeManifest
from validator.errorbundler import ErrorBundle
from .helper import MockXPI
@mock.patch('validator.submain.test_package')
def test_prepare_package(test_package):
"""Tests that the prepare_package does not raise any errors when given
a valid add-on."""
err = ErrorBundle()
submain.prepare_package(err, 'tests/resources/main/foo.xpi')
assert not err.failed()
@mock.patch('validator.submain.test_package')
@mock.patch('validator.constants.IN_TESTS', False)
def test_validation_timeout(test_package):
def slow(*args, **kw):
time.sleep(1)
test_package.side_effect = slow
err = ErrorBundle()
err.error(('an', 'error'), 'occurred')
submain.prepare_package(err, 'tests/resources/main/foo.xpi', timeout=0.1)
# Make sure that our error got moved to the front of the list.
eq_(len(err.errors), 2)
eq_(err.errors[0]['id'],
('validator', 'unexpected_exception', 'validation_timeout'))
@mock.patch('validator.submain.test_package')
@mock.patch('validator.errorbundler.log')
@mock.patch('validator.constants.IN_TESTS', False)
def test_validation_error(log, test_package):
"""Test that an unexpected exception during validation is turned into
an error message and logged."""
test_package.side_effect = Exception
err = ErrorBundle()
err.error(('an', 'error'), 'occurred')
submain.prepare_package(err, 'tests/resources/main/foo.xpi')
assert log.error.called
# Make sure that our error got moved to the front of the list.
eq_(len(err.errors), 2)
eq_(err.errors[0]['id'], ('validator', 'unexpected_exception'))
@mock.patch('validator.submain.test_search')
def test_prepare_package_extension(test_search):
'Tests that bad extensions get outright rejections.'
# Files with an invalid extension raise an error prior to
# calling `test_search`.
err = ErrorBundle()
submain.prepare_package(err, 'foo/bar/test.foo')
assert not test_search.called
eq_(len(err.errors), 1)
eq_(err.errors[0]['id'], ('main', 'prepare_package', 'not_found'))
# Files which do not exist raise an error prior to calling `test_search`.
err = ErrorBundle()
submain.prepare_package(err, 'foo/bar/test.xml')
assert not test_search.called
eq_(len(err.errors), 1)
eq_(err.errors[0]['id'], ('main', 'prepare_package', 'not_found'))
def test_prepare_package_missing():
'Tests that the prepare_package function fails when file is not found'
err = ErrorBundle()
submain.prepare_package(err, 'foo/bar/asdf/qwerty.xyz')
assert err.failed()
def test_prepare_package_bad_file():
'Tests that the prepare_package function fails for unknown files'
err = ErrorBundle()
submain.prepare_package(err, 'tests/resources/main/foo.bar')
assert err.failed()
@mock.patch('validator.submain.test_search')
def test_prepare_package_xml(test_search):
'Tests that the prepare_package function passes with search providers'
err = ErrorBundle()
submain.prepare_package(err, 'tests/resources/main/foo.xml')
assert not err.failed()
assert test_search.called
test_search.side_effect = lambda err, *args: err.error(('x'), 'Failed')
submain.prepare_package(err, 'tests/resources/main/foo.xml')
assert err.failed()
# Test the function of the decorator iterator
def test_test_inner_package():
'Tests that the test_inner_package function works properly'
with patch_decorator():
err = MockErrorHandler()
submain.test_inner_package(err, 'foo', 'bar')
assert not err.failed()
def test_test_inner_package_failtier():
'Tests that the test_inner_package function fails at a failed tier'
with patch_decorator(fail_tier=3):
err = MockErrorHandler()
submain.test_inner_package(err, 'foo', 'bar')
assert err.failed()
# Test chrome.manifest populator
def test_populate_chrome_manifest():
"""Ensure that the chrome manifest is populated if available."""
err = MockErrorHandler()
package_contents = {
'chrome.manifest': 'tests/resources/chromemanifest/chrome.manifest'}
package = MockXPI(package_contents)
submain.populate_chrome_manifest(err, MockXPI())
assert not err.pushable_resources
submain.populate_chrome_manifest(err, package)
assert err.pushable_resources
assert 'chrome.manifest' in err.pushable_resources
print err.pushable_resources
assert isinstance(err.pushable_resources['chrome.manifest'],
ChromeManifest)
assert err.resources
assert 'chrome.manifest_nopush' in err.resources
print err.resources
assert isinstance(err.resources['chrome.manifest_nopush'], ChromeManifest)
def test_proper_linked_manifest():
"""Test that linked manifests are imported properly."""
err = ErrorBundle()
package = MockXPI({
'chrome.manifest': 'tests/resources/submain/linkman/base1.manifest',
'submanifest.manifest':
'tests/resources/submain/linkman/base2.manifest'})
submain.populate_chrome_manifest(err, package)
chrome = err.get_resource('chrome.manifest')
assert chrome
assert not err.failed() or err.notices
# From the base file:
assert list(chrome.get_entries('foo'))
# From the linked manifest:
zaps = list(chrome.get_entries('zap'))
assert zaps
eq_(zaps[0]['filename'], 'submanifest.manifest')
eq_(zaps[0]['context'].data, ['zap baz', ''])
def test_proper_linked_manifest_relative():
"""
Test that linked manifests are imported relatively when using relative
paths.
"""
err = ErrorBundle()
package = MockXPI({
'chrome.manifest': 'tests/resources/submain/linkman/subdir.manifest',
'dir/level2.manifest':
'tests/resources/submain/linkman/foosub.manifest',
'dir/foo.manifest': 'tests/resources/submain/linkman/base2.manifest'})
submain.populate_chrome_manifest(err, package)
chrome = err.get_resource('chrome.manifest')
assert chrome
assert not err.failed() or err.notices
# From the linked manifest:
zaps = list(chrome.get_entries('zap'))
assert zaps
eq_(zaps[0]['filename'], 'dir/foo.manifest')
eq_(zaps[0]['context'].data, ['zap baz', ''])
def test_missing_manifest_link():
"""Test that missing linked manifests are properly flagged."""
err = ErrorBundle()
package = MockXPI({
'chrome.manifest': 'tests/resources/submain/linkman/base1.manifest'})
submain.populate_chrome_manifest(err, package)
chrome = err.get_resource('chrome.manifest')
assert chrome
assert not err.failed()
assert err.notices
# From the base file:
assert list(chrome.get_entries('foo'))
# From the linked manifest:
assert not list(chrome.get_entries('zap'))
def test_linked_manifest_recursion():
"""Test that recursive linked manifests are flagged properly."""
err = ErrorBundle()
package = MockXPI({
'chrome.manifest': 'tests/resources/submain/linkman/base1.manifest',
'submanifest.manifest':
'tests/resources/submain/linkman/recurse.manifest'})
submain.populate_chrome_manifest(err, package)
chrome = err.get_resource('chrome.manifest')
assert chrome
print err.print_summary(verbose=True)
assert not err.failed()
assert not err.notices
# From the base file:
assert list(chrome.get_entries('foo'))
# From the linked manifest:
assert not list(chrome.get_entries('zap'))
# Test determined modes
def test_test_inner_package_determined():
'Tests that the determined test_inner_package function works properly'
with patch_decorator(determined=True) as decorator:
err = MockErrorHandler(determined=True)
submain.test_inner_package(err, 'foo', 'bar')
assert not err.failed()
assert decorator.last_tier == 5
def test_test_inner_package_determined_failtier():
'Tests the test_inner_package function in determined mode while failing'
with patch_decorator(fail_tier=3, determined=True) as decorator:
err = MockErrorHandler(determined=True)
submain.test_inner_package(err, 'foo', 'bar')
assert err.failed()
assert decorator.last_tier == 5
# These desparately need to be rewritten:
def patch_decorator(*args, **kw):
return mock.patch.object(submain, 'decorator', MockDecorator(*args, **kw))
class MockDecorator(mock.MagicMock):
def __init__(self, fail_tier=None, determined=False, **kw):
super(MockDecorator, self).__init__(**kw)
self.determined = determined
self.ordering = [1]
self.fail_tier = fail_tier
self.last_tier = 0
def get_tiers(self):
'Returns unordered tiers. These must be in a random order.'
return (4, 1, 3, 5, 2)
def get_tests(self, tier, type):
'Should return a list of tests that occur in a certain order'
self.on_tier = tier
print 'Retrieving Tests: Tier %d' % tier
if self.fail_tier is not None:
if tier == self.fail_tier:
print '> Fail Tier'
yield {'test': lambda x, y: x.fail_tier(),
'simple': False,
'versions': None}
assert tier <= self.fail_tier or self.determined
self.last_tier = tier
for x in range(1, 10): # Ten times because we care
# ^ Very witty. However, it would be nice to actually know
# exactly why we're yielding these ten times.
print 'Yielding Complex'
yield {'test': lambda x, z: x.report(tier),
'simple': False,
'versions': None}
print 'Yielding Simple'
yield {'test': lambda x, z=None: x.test_simple(z),
'simple': True,
'versions': None}
def report_tier(self, tier):
'Checks to make sure the last test run is on the current tier.'
assert tier == self.on_tier
def report_fail(self):
'Alerts the tester to a failure'
print self.on_tier
print self.fail_tier
assert self.on_tier == self.fail_tier
class MockErrorHandler(mock.MagicMock):
def __init__(self, determined=False, **kw):
super(MockErrorHandler, self).__init__(**kw)
self.detected_type = 0
self.has_failed = False
self.determined = determined
self.pushable_resources = {}
self.resources = {}
def save_resource(self, name, value, pushable=False):
'Saves a resource to the bundler'
resources = self.pushable_resources if pushable else self.resources
resources[name] = value
def set_tier(self, tier):
'Sets the tier'
pass
def report(self, tier):
'Passes the tier back to the mock decorator to verify the tier'
submain.decorator.report_tier(tier)
def fail_tier(self):
'Simulates a failure'
self.has_failed = True
submain.decorator.report_fail()
def test_simple(self, z):
'Makes sure that the second two params of a simple test are respected'
assert z is None
def failed(self, fail_on_warnings=False):
'Simple accessor because the standard error handler has one'
return self.has_failed
| bsd-3-clause |
jdowner/qtile | test/layouts/layout_utils.py | 11 | 2371 | # Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 Mattias Svala
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Chris Wesseling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def assertFocused(self, name):
"""Asserts that window with specified name is currently focused"""
info = self.c.window.info()
assert info['name'] == name, 'Got {0!r}, expected {1!r}'.format(
info['name'], name)
def assertDimensions(self, x, y, w, h, win=None):
"""Asserts dimensions of window"""
if win is None:
win = self.c.window
info = win.info()
assert info['x'] == x, info
assert info['y'] == y, info
assert info['width'] == w, info # why?
assert info['height'] == h, info
def assertFocusPath(self, *names):
for i in names:
self.c.group.next_window()
assertFocused(self, i)
# let's check twice for sure
for i in names:
self.c.group.next_window()
assertFocused(self, i)
# Ok, let's check backwards now
for i in reversed(names):
assertFocused(self, i)
self.c.group.prev_window()
# and twice for sure
for i in reversed(names):
assertFocused(self, i)
self.c.group.prev_window()
| mit |
miloh/spreads | spreadsplug/gui/gui_rc.py | 5 | 176855 | # -*- coding: utf-8 -*-
# flake8: noqa
# Resource object code
#
# Created: Tue Jul 2 14:54:22 2013
# by: The Resource Compiler for PySide (Qt v4.8.2)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = "\x00\x00\xf1~\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\xc8\x00\x00\x01\xb0\x08\x04\x00\x00\x00FW\xe2\x0d\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdd\x07\x02\x0c6\x10\xe7\xec\xc9\xbf\x00\x00\x00\x19tEXtComment\x00Created with GIMPW\x81\x0e\x17\x00\x00 \x00IDATx\xda\xec\xddy\x98\x9dgy\xe7\xf9\xcf{\xf6\xda\xf7\xbdT\xa5RI\xa5]\xb2dY\xb2-\xef6\xb6\xc16`\xb6@H7aH M\xe8\xd0\xa4\x03IzKw\xd2\x19:\xddIz\x92\xe9l\xd04\x10\x02\x0e\x10\xb3\xe3\x0d\xbc[\xb2-Y\xfb\xaeR\x95jQ\xed\xfb~\xf63\x7f\x8c\xe7\xbaz\xe6\xca\x00\x0d\x18$\xd0\xf7\xcf\xb7\xea\x5c\xe7~\xce\xef\xfd=\xf7\xfd\xdc\xef\xf3\x9c\xc3U\xaer\x95\xab\x5c\xe5*W\xb9\xcaU\xaer\x95\xab\x5c\xe5*W\xb9\xcaU\xaer\x95\xab\x5c\xe5*W\xb9\xca\xcf6\xc1\x95\x10\xe4G\x82\xb3\xa1Gs\xec\x0b\xffi\xb4\xb5\xadv\xf3\x0b7W\x1f\xc9\xd7\xceVd\xb7\xd5t\xc5{gOU\x7f7\x1c\x0a\x1f\xfe\xe4\xd4\xff\xfd\xdf\xf7\x84\x1e\xcf_\x15\xe45\xe4_\x96\xfc\xc9\x12\xbf\xfe\xd6}\xb7\x16\xdf^Q>\xda\x94\x8e\xdeeI\xde\x94\x82)9I5\x12Fg\xda\xf7/\xc5\xcb\xf7M\xfc\xf93\xd3\xef)\xfe\xec\xf2UA^3~\x7f\xed\xc9_\x8a|\xf0|\xdd\xfb\xd4f\xae\xd1\x14\xce\x86^TdJRA\x9b\xd3\x06L:\xe1\xbe\xec\xd1\xd0h!\x16\x0eK|k\xc3\xaf\xfe\x97\xd1\xd7\x07\x8f\x16\xae\x0a\xf2cfC\xecl\xfa-\xbf\xd1\xf4gSV[\x95\xbf#\x88\x06Mr\xca\x1c\xf6U;\x94`@B#\x0e:f\x8bGu\x14\xc2\x85\xc5P*?\xfbK\xfb\x1f\xba.~0u\xe5\x09\x12\xbe|C\xbb/\xfc\xa5\xc8\xe4\xc5\xb6_\x18\xc9\xad\x0e5\xb9;h\x0a\x22\xc2R>\xaf\xca\xa0w\x9a\xd4h\x8bR\x87\xcc\xb8\xd7\x9cS\xf6\xba\x14d\x82\x19\x95\xb9\xe8\xdb\xd7\xed\xb9\xe5\xf1D\xb2\xa7p\xd5!?&\xda\xc3\xfd\xb9;.FV?P\xb83xE\xcaV\x09\xad\x8a\xfd\xad\x0e/\x0b\x9b\x14Q\xacBR\x83>\xfd6y\xc0\xbf\x15v\xb7'\x05\x8e\x17\xca\x83\xea\xccse\xbf\x93\xfe\xd7\x85\xab\x82\xfc\x98\xb8\xe1\xc9\x9d\xb7\xde\x19\x9eR!\xa2\xc2\x88.\x07\xdd\xef\xa4\xbf\xf6fY]\xbe\xab\xc8zG\xcc\x9bs\x93\x05\xe7\xecq\xd6~-\x96-+7\x94,;\xff\xf4\xf6\xb7\x85\xff!wu\xca\xfa\x91\xb9\xb6l\xe7\xb6\xf5\x1f\xbf)\x1f\x0a\xbd\xd9\x13\xded\xc1\x13\x9e\xf1&\x17|M\xc2j7[\xb2Y\xad\x0b\xf6*\xd1\xe6\xb4\xfdn\x14\xf2\x05\xb7i\xb6\xd9\x88JK\xe1\xaa\xc6M\x95\x0f?\xfa\xe6\xd0\xd9\xc2\xcf\x95C\x0eD\x9e\x0fE2\x1f\xfe\xb1\x0e\xfa?W>2SW\xf8h\xb0\xde\xa3~\xc1C\xca\xdd\xe5\xcbb*\xacX\xf4\xa0\xacaI!U\xbe\xaa\xda\x8c\x03\xf6z\x5c\xc4\xaf\xd8oE\xceZ\x7f\xab\xd9\xa5|>\xb4\xf5\x9a\xcf\x1c\xfb9s\xc8\xaf\x04\x03\x82\xc2c?\xd6\xb0j\xfb\xd7D\xdf\x13\x89K\xcaJk\xb7J\xdaF\xad\xd6X%l\xc4\x92\xb4\x22\x0d&U\xd8`\xc2\xf5\xbe\xac\xc1\x06\x05aI\xc5\x1e\x15\x93#\xc8\x99\xac\x1cz\xf8j\x0e\xf9\x91\xb8?\x11\xfa\x85\x9a\xcf\xac\xb7Y\x85\xa4\x9d\xceZ\xa7XJXNN\x89\xb4y\x81\x9c\xac\x84E\x8bV\x1c5\x22\xa5T\xa95ZD}\xce\xb4Y\xcb\xb68\x93\xeb\x08fwD\xcf}\xfe\x8a)\x80C\x97[@wF\xbe\x95,\xfb\xc8\x9c\xcd\x8a\x15+\xf3\x92\xed\x06L\x0b\x9b\xb3$cYF\x5c\xb1iY\xe3\xc6\xad\x11w\xadj\xed\xce:o\x5c\xb9\x1a\xd3\xf2B\x02\xd3\x84\x87C=;\xbe\x1a\x5cu\xc8\x8f\x10\xd1'\x1a\xfen\xe4c\x85pPl\xd66\xb5\x06\x14K\x08\x0b\x89\x0b\x99\x17\x16RdTZ\x9d\x90Y\xcb\x8a\x0dyI\xb5gM\xd9\xad\xd1V\x7fl\xb5#*\x0c(\x91\x9f\xdfWq[\xf4\x99\xccU\x87\xfc/\xf3\xa9@H\xe1\xe0\xbe\xed\x99L\x107\xadC\x89y\xab$\x84-K\xcbZR\x22\xf9\xeat\x15X1o^ \xe5\x9b\x1a\x9c\xb6E\xd6\x84\xc0\xac\x11\x099#\x9a\xad\xe4c\xe5\x85\xa2g\xae\x98\xd2\xf7\xb2*{;\xea\xffi\xc7\xfc\x96\xeb\x1f\x08UW\x05uJ\xa5$\x84D\x84\x84\xc5\x85\x05\x0a\xc2*\x85\x84\xe5d0/o\xc9\xb3\xb6\xd8o\xb7\xc7\xdd\xa9[\x85\xd5\x96\x05\xba,\x8a*5\x19|}\xba{\xff\xd5\x1c\xf2\xbf\xcc\xc7\x9ab_=\xbd\xfc\xee\xdd\xfb\x83PP\xa3Q\xaf\xf5j\x85\xac\xc8X\x92\x93\x906-!\xa3 /\xaaTA\xb1:Y\xeb\x9d\xb2\xdd\x8b\xd6\x8b+\xb7\xd7\x80\x16}\xce\x1b\xb1$\x1b,dC\xb7_99$r\xf9\x842\xfe\xa7]\xdf\xdc\xf4\x9f\xca\xde6\xe6&\x1d\xe6\xdd\x8b\x9c\x9c\xbc\x0a\x11\xe3\x16Tk0%\x8c\x18R\x86\x84\xcd\xa8\xf3\x82\x06\x0b\x0ar\x0e)V+*\xad\xc3q\xeb\xa5Lj\xcdw\x97]9\x82\x5c&\x0e\xf9\xab\xd0\xe3\xd5\xd1\xbes\xb7\x94\xbe\xed\xe5\xdc=\x85\x1b5[-+'\xa3B\x95\x11\x83\x8a\xb4Y\xb0 $\x10S\x10\x96\x16(\xd1n\xc0\x8ce\xa7\xad\x92Ug\xd9\x88\x0b*u\xbbU\x93\xa8r%\x85\xcc\xff$\xc8'J?\x11\xfa\xb9\x10\xe4\x99\x1f\xa9^\xfbg\xf9{\xa6\xe3\xbf\xb6\xdcR\xf0\x8d\xf0\xf5\xc1\x9ce9YaI\xf3\x96\x95JK\x1b\xd6fTXV \xd0\xe3\x82s\xce\x99rP\xcca\x09)Uz\x05\xfa\xdc\xe9Sj}\xd3\x05g\xac\xb8\x18l\xab*D\xfe\xf0\xd5\xf8\xde\xbf\xf8\xfe\xfc\xcf\xc7\x94\x15\xf8\x11\x9b'g\x1fx\xe7\xf3\xcfZ\xa5K\xc1\x90\x88\x8c\xac\xcd\xe6\x94;$lY\xc6\xb4\xcd\xc6$59\xa9AH\x95\x941E\x16\x85\x85\xcd\x98W\xa5D\xad\x7fcIT\xa5\x9c\xeb\xec\xd3`&\x1dd\xab\xaf\x90\xb5\xc8e\x14\xe6\x1f\xec:p0#\xe6\x1a\xdf\xb5\xa2\xd5\x94\x9bd\xfc\x91\x09yS*\x05\x9e3\xec]>o\xc86\x8f\xda\xe5\xa8\x8c[LI\x0b,X\xe5\x8c\x942kty\xdc\xa2ie\x86l\xd0\x97Z8\x7fh\xdb\xfePIh{\xf6\xaa ?0\x1f\x09\x9d\x8aW,\x0c\x85\x87\xac\xd2\xea\xdd\xfeV\xda:\xafw\xce[\x85\x84L\xaa\xf3_D\x1d\xd1\xeeQm\xa2\xe6\xc5dm0(\xe5.\x19\xcbfM\xd9hJJJ^\x9fy!\xdbL\xa5gg\xffE\xd7c\x0b\x9f\xbb\x22\xb6>\x5c6\xeb\x906\xdf\xc8TTfn\xa8*4\x057\x19\xd7h\xb5\x0a\xa5*\xc4U\x0b\xb4x\xd8\xd7\xa5E\x1dr\xa7Q\xd3\x8a\xc5<(\x10\xb8\xe4\x90Mf\xac\xd8iB\x85\x98\x8c\x93ZL+\xc7\xbc\xda\xd8\x17\xffnn\xf6_G\x1e\xfd>\xcb\xc3o\x07\xef\x0e\x7f\xe1\xa7\xdc\xaa\xbfl*\x8e\xda\x80\x8d\xb7\xef~f)\xb8\xcew=\xe3\xdb\xbe\xe4\x8c!\x15\xf6+\x17\xe8\xf5\x90-V,\xd8\xe3\xdbV\x1b\xc2%\xc7\xa4\xad\xf5\x06\xd7\xf8\xac\xb5\xe6\xc4\x9d\xb6\xca)\x83v\xa1\xdd\xac\xbcXx9\xdeu\xdf\xd1\xdc+\x99\xef'\xc7}\x85\x9f\xfe\xe7\xf1\x13\x0f\xe0\x1b\xf1\x7f\xfc\xfa_\xe6\xf9\x1f;\x0e\x8c\x96zX\xc6\xbf\xf6\x9c\x03\xae7\xa5\xc3y\xcczD\x85qMn\xf0^\x17\xfd\x95q\xef\xf0\xa0Q\xfb=k\xc5&\xbfjH\x8b\x15\xeb]T\xa2]\x9f\x98\x03\x9a\xac\x92U!\xfd\xb6\xa00\xfd}\xc6z_\x01?\xf5,s\x99\xd5\x1e7\x5c\x9a\xaf\xce\x14\xfd\xa2-\x1e\x11Rl\xb3\x94\xa4\x8f\xfa\x0b\xaf\xb8\xe0\x1e\xf3\xaew\x8b\xcf8*'kN\xd2/\xf9\x86s\xae\xb7\xcb\xb4\x11\x9d\x86U\xba\xe0f\x0d\x0e)\xf7)\xcd\xa2j\x0a\x97\x82\x17\x02\x91\x7f\xec\xe3\xfef \xf2\xc0e\xd4x\xbc\xcc\x16I\xd3ekr\xbf\xe09\x87d\xf5y\xc5\x17\xbdY\x07\xe8\xd5i\xc2\x83\xce{\x87\x8cU\x12\xc6,\xfb\x9c\xaf\xf9\xa0\xb7\xe9\xd0#\xecuRV\xe9\xd6\xa5\xdeoz\xc8\x82\x06i\xb3z\xa5\xdd\xf5\xc1\x7f\xfc\xee\x7f\xa0\xf0\xc0e\xd5\x07\xbe\x8c\x04\xd9\x16\xde\xba\xa6$\x91\x89\xed\x12V\xe7\xa0g\xed\xf2>\xd3\x9a\xe5\xf4\xd9h\xc5\xa2\x84\xfb\xdd\xe4QI=f\xfc\x9a\x97\xfc\xa6.o\xd2l\xcc\x82^\xf7zJZ\x8fGmP\xea\x82\x90\x84\x88L\xd0\xbe<\xf2K?\xb7\xad\x93/\x94\xfcp\xafk\x0a\xad]'\xba\x18\xa9Pm\xc6\x1f\xdb\xe7=\x1e\xb4\xa0\xc6\xbc\xa4\x15\x0b\x9a\x1c\x93\xb2\xc3\x7f\xd7\xe9\xbf\xf93g}\xd7\x88\xbcq\xe3\xeau(\xf3\xa4\x07L\xd8\xaaW\xc2\x82\x9d\xda\xcdXm\x95\xb1ph\xd3\x87\xeb\xdf\x10\xb9\xfc\x05y\x0dB\xfc\xc5\xa5\x1f\xb2\xb9(T\x5c\x1c\x84\x84\xd5\x0a{\xd2\x0e[\xbc \xa1\xce\xa8y\x81\x16Y{\xad\xb8`\xcem\x9et\xc9\x8c\x1bmR\xedf5f\xedS)\xef\x15y9c\xdef\xd6\x11\xfd\x02\x11%\xe6\xb2u\xa9?\x1b\xff`\xe8\xf2\x17\xe42z\x1e\xb2\xa2\xea\xfa\x957\xadR\x15\x14\x09\x89)7i\x97\x01\xabM\xf8\x8a\xa4\x0e;\x1c\xb3\xc56\xc3\xce\xda\xa0\xde\xad\xb6i\x92\x954\xe9\x98r\xfdf%\x11\x95p\xd85\xce\xd9jN\xd2\x94.\x97b\xd7\xff\xfd\xc8Bo\xfe\xe7\xd0!?\xfc\xecY\x9eH\x09\x09\xfbM\xa7\x94\xcai\x14\x91T\xe1;\xf6xL\xab/y\xbb\x12%^gERJ\x99\x88\xb8~\x09\xcch\xc7\x94E\xf3Rj\x9d\x15U\xef\xb0\xb0\xb0\x98\x05\xd7\x9a\xd0\xf4\xf3\x99C~8^\x1f\x5cS\xa8\xa8\x9c\x93\x0a\x06\x1cQ\xa3L\x85Es\xe6\xac(8\xa4\xcb\x17\xad\x13w\xd2\x82\x8c\x84J\xedbV\x9c\xd3\xea\x19O*2 \xa7CH\xd2\x0e\x17T\xbc\xbaU('P\xae$|&\xbaP9\x12\x5c\x15\xe4\x07\xe6\xd1B4\x9a9\x91(\x14\x17V\x89Z1\xa3_\xa5\xbc\xb4\x93\xfae\x9dv\xad9Yq\x83z\x0d\x9a7h\xde\x8ay_\x15q\xbdE\x1b\xc5\xe4\x84\xa4\xed\x17\x91\x91q\xa7\x11iMF,\x85+\xc37\xd7]w\x058\xe4\xb2\x99\xb2\xbe\x1c\xbc=\xf3@G6\xe8)\x0ci\xb5l\x83Ns\x8a\xd5\xebP\xa2\xce\x8ak\x9cvN\x91@LJ\xd2\xb2\x15i\x09w\xfb;\xe7\xdd\xe9\x94\xa8F\x874j\xd7b\xd1\x93\x0e\xe92b@\xc8$\x9e\xc9\x04\xa1\xab\x0e\xf9\x81\xf9\xa2Z+%\xe5\x9a\x82!)\x0d\xf2\x0a\xaa\xfc\xa9\xb4\xaf\x889\xa1\xd5\xa0\x09/;\x89\xb5\xe2V\x19\x975`\xd4_\x08+\xf7\xa8\xacs\xce+\xe8\xb2d\x83q\x05\xcb\xea%\xcd\xb8\xd9\x8a\x1e\xa1P&\x7fU\x90\x1f\x98\xe5`\xb2\x90\xea\xda\xa5\xccEG\x8c\xaa\x12\x92\x12\xf3N\xb7X\xb6M\x9b\x82\x90\x94R\x0b\xce[\xef\x88%+f\x8di\xb4dV\x8b\xaf\xb8\xdf\x8a\xcd2\xea-yY\xb3=\xfa%q\xda\xa2_6T{<wU\x90\x1f\x98\x02\x12\xad\x87\xcdX\xaf\xd7\xb0\x22!\xa3BX\xed\x97\xb5\xe96j\xd2\x929\xbd\x12\x0e\xa8\xb7\xdb\x88S\xd6\xeb\xd1f\xca\xb0\x9b\xbc(P\xe1\x09c\xfe\x5c\xc2\x84@\x91V\xc5\x02!\xc7$\xb6O\xe7\xb6]\xf6i\xfd\xb2Y\x874\x85\x06\x0a\x89\x7f\xbb\xad\xa6Y\x9f\x94N\xd7\x09<\xe9\x98\x13\x06}\xce\x11\xcb\xfa-H*\xf7z'm\x135\xadV\xbd\xf3v8\xe2\x1aO\xcb\xa86\xe7\xa0\xa41Q\xacqJ\x93v!\xf5.\x8a\xc9\x95\x0c\xfdM{h\xa8p\xd5!?\x10\x89BG\xa2\xb6~\xc8\x82\xbc\x17\x85\xf0\xb2\x05\xff\xc5\x09\x0f9jV\x06iU.\x18\xd6d\xc1\x09\x17L\xa9q\x9d^e\x9e\xb6]J\xde%\x09\xeb\x8c\x0a\xd9-\xa7F\xbfA\x15\xca\x85\xd5X,g\xee\xea\x94\xf5\x83\xf2\x9d|\xd7\xbad\xc5\x92@R\xab\xcf;\xa7\xc7v\xfc\xef\x1e\xf5\xfb\xde+\xe4V\xd7iw\xa3ia\xcf\xc9[\xab\x5c\xb5q;\x95\xd8hHJ\xb9q\xc5\x12j\xd4\x1aw\xcam\x8a\x95\x99\xb2`\xab~5\x0d\xa2\xbf\xfe\xaa?~\xbb\xe8j\xd9\xfb}Y\xfc\x8d\x9c\xb8J\xa7e\x94\x99\x11\xb7F\xb7k\xf0\xb7\xbe\xe8\xc3F\x85\x04\x92z\xddl\x8fy}\xa6lu\xd4I\x93\xd6\xf9\xae\x8d\x86l4dL\xa7\x09[m\xf7\x8c\xb4!\xefr\xd8\x80\xb4\xfa\xd2\xfb\xda~\xa3\xe7\xd57\xcb]u\xc8\xf7\xe4\xd7C\x85p\xf6\x9f\xc6\x97'\xf5I\x88\xe9\x91\xb7\xd6\x0bj\xfd\xb5N\xbf\xe1z\x1f\x96\x14\x11\xd7k\xa7A\xb3\xce\xaa\xd7\xe0\xbf\xaa2\xad\xddCn\x90\xf1\x92^\x0d6\x9b\xd3i\xbf>\xcd\xea\xc4}Y\xce=\x9a\x0b\xa3\x8a\xde\xfd\xff\xbc\xdf\x1f\xa5\xaf&\xf5\xef\xc1\x7f\x0a>\x1d~\xf8\xb7\x8b\xef\xca\xc76\xa8wA\xb9>\x0f\xe8\xf0uO\x98\xf7z_\xb7M\xdc\x88\xe7\xe4\xbc\xd7[\x15\x9bS\xed%\x14\xeb\x95\xc5\x92-\xf6\xdb!k\x5c\xadI\xe36H\xc8\xe8\xb5`\xb7\xbc!KA\xac\x10N\x9d\xfb\xfc\xd5*\xeb\x07`>r.\xdb\xf8\xa7\xcb\xcd\x15A\xa9\xa8^qK\xea\x8c+v\xcank\xddf\x8f\xcd\xa2\xae\xb3\xd6;\xfc\xba{\xbdbL\x97\xa7\xcd\x9a\x97u\x5c\x99\x8b\x16\x8d\xc8\x08\xe9\xf5\x06\x1d\x8e*7\xa8\xd6ju*\x9c3\xae\xae0\xb5\xae\xfeX\xcb\xe0\xf0e}R\xe4\xb2\x98\xb2\x0e\xe4\x0f\xd4\xd6\xec*\x04UJ\x15+\xb3`\xab\xc7-\x0a\xfb\x8cV\x97\x0cyQ\xd6f\x9d\x22*|\xc0W\xac\xb6\xe2)\xe3\x12\xc2\xca\x14+1$\x903'e\x95y\xdd\xea4+\x985'\xa3O\xbb5\x84\xea\x0b\xf1\xbf\xad.\xe7\x83\xe1\xab\x82|o\xf2\xff\xfe\x0f\x92\x9a\x5c\x92Q&)bF\xde!\xf7(\xd2\x22\xea\xbc\x01\x8fxX\xc1Y\xfb\x84E\xc5\xcd\xcai7f\x83>\x0b\x06-\x99W\xaa\x5c\xad\xa8\x09\x97\xac\xb6l\xa7\xb0\x88\xb8\xac\x06ie2\xc1\xaaxd\xf8W\xdf\xf5R\xe4\x1d\xdfs\xdc\xcf\x07\xcf\x07?\xdf\x82\x88v\xce\x98T\xaa\xc5\xa0\x82\x9c)\x9b\x9d4m\xd8E/\xf9\xba}N\x8by\x5c\x95\x0a\xf3\x8a}\xdd\x98S\xaa\xac\xe8\x91\xd3\xa6_\xa32cj\x84TX\xb0\xce\x09S\x86\x84-+3b\xd9EcV\x99\x8e\xb5\xe4\xcf~\xa1\xfe\x91/\xe5\xbf\xe7~\x9b\xe0\x7f\xfek!\xfcs&\xc8\xaf\x06\x85\xe8\xf8m\xa5\xe9\xbcJ=\xf2j\x95i\xf4\xbcj\xd3\xc6\x95\xb8\xd97\xfc\xb9wz\x93\xad*4\xd9n\xab\x94\xbd6\xba`\x87\x01\x95\x8ej\x92\x90\xb3AB\xc4\x1aS\x0e\xdb%o\xd2\x8a[\x94\xba\xc6qm\xe2\x92\xf2&C\xd7:u\xeb\xed\x85_\xbf\xe7\xff?\xa2\x9b\xf37\xbf\xda\x86\xfc\x0f\x01\xbf_\xfc\x1f\xff\x11\xf1\x9e}\x8d<\xf4\x9a[\xf3@Xa\xf7\xf7\xe9\xb2\x1e\x0a\xfe\xdd\xd2d8\x12k\xd7*\xee\x80y3v\x9a1\xe03b:ed$\x8c\x19\x96\xf0\x8a\x12+\x22\x9eQeT`BV\x9bC6\xe8\xf5Q\xfb4\xfa\xaam\xc6\xcd\xa1\xd9Y\xbb\xbd\xddC\x96eL\x0a\xdb`Q\xa0[\xf5\xcaR\xd1\xb5\x1f\xfa?\xff\xe2W\xa3\x9f\xfc!S\xfc\xb3\xf1[SW\xa4 \xdf\x9f\xfb\xc3\xb95c\xa7k\x0be\xd1\x15\x8dr.XPmR\x5c\xb3\x16\x1fU%\x22@\x8f\xb5\x9ep\x8b\x03\x1e\x932.$g\xd6yo\xf4\x98\xbb\x1d\xd2\xae\xd9\x8c\xd3\xb68\xa4H\x9b%\x8b\xaa\xac\xa8\xf1&\xdfu\xca\xbb=,\x22\xe5\x0e\x8fhw\xa1\xb0*\xd8\xf1\xee?\xfc\xc2\xd6\xe0\xc4e\xd5\xdd\xba\x0c\xea\x8dx8\xd3\xb9\xf0\xbe\x12\x89\xd0\xad\x0e\xdb(\x83R!eB\x0e\xd8k\x8dE!\xf3\x86\x9d\x16\xf2\xd7ze\x05\xe6\x95\xcbI)\x17\xd1a\xdc\xb8\xb7\x0a\x1c\xb0JFA\xa5E\x0b\xd6\xba\xa4\xc6\xfd\xfa\x0c\x88\xe9\x11\xd5*\xaaO\xdcjSA>\x93y{Y\xe4\xd0S{\xc2\x97S\xc3\xf12\x10\xe4\x17\x22\xb57\xc4\xdfQ$\x15\xf4x\xa3\x1a/Z\x12(\x96\xb4\xce\x98e'l2\xef\x92bC\x1e\xb1\xc9\xa2I\x8b\xa6\xf5Y\xb4VV\xc1iI\xbf\xe9\x19\xe5\xa2\x92\x86\x94\xd9m\xc0\x0e}Vl\xd7\xe5\x90\x94*\xed\xe6\x8d\x0b+\xb8\xde\x90\x15\x99\xf0\x8a\xce[V\x9e=~\xf1\xaaC\xfe'^\x0e\xfe(\x12z{\xff-U\xc1\x8cf\x81\xa4!\x0b:\x9c\xf6V\x07\xa4\xa54\x9a1\xe2\x80\x01\x93:M:\xad\xd4\x09qy\x1d&\x95\xe9\xf1F\xf5\xc2J\xec\xb7`\xb5\x88Y}\xf68\xa4DZB\x95Ur\xa6\x9c\x94\xd4\xee\x92\x8c*}\xf2VY\x90MF\xde=\xf0qW\x9eC\x9e\x0e~9\xf8\xeck\x12\xc0c\xe1\xd2P\xf8\x89\xb6\xc2Hp\xab\xa8~\x15V\x14\xe9\xb7\xcd~eV\x948\xa4 \xa7\xd9\xa4\x03\xce\xea7n\xd2vs:\xf5\x08K{\xd0\xa0]N)\xd2'\xac\xc8\xa0&\xb5\x0e(\x17\x92\xb5G\xc2\x8a\x8cy)\x816!\xab\x0d\xdb`\xdaFSJ\xc3K\xd17\xf6n:\x7f\xf4\xb29[\xf5\x83\x96\xbd\x81\xc8\xd3\xafA\x01\xf0\xde\xe0\xa6\xc4\xd6\xa9B6\x1b\xac\xf1\xa2\xb0:\xc5\x96\x0c\xdb\xe0\x9c\x1bL)R#\xe3u\xfe\xc1'<,$f\x93F\xd7h\xf5\xa0q7\xb8E\xccY]\x96\xc5]\x107eZ\xadaY\x81Y\xb5\xa6\xf5\xaa\x920*\xa2X\xa5A1Q!\x97T:#\xaa1\xa8+\x5c\xfal\xef\xb5W\xdcJ\xfd\xf6\xbc\xec\xed?vc\xdf\x1d\xfata\xfe\xff\x98*i\x8fL\x1aU%\xafA\xa0F\xdeI]\x9e\x14\xb6\xd5%;<\xed\xcb\xae\xf3A\xbb\xed\xb2\xdb\xaf\xfb\x15\xe5&\xddf\x8d/\xe8\xb4^\xd8\xdf\xdb\xa6\xdc\xac&\x95\xf2\x92\x86\xb4\xdb\xe3\xb0\x15w9\xec\xbcF\xadR.\xca(1-gN\xd4\x1a9\x07M\x04\x93\xc9U_\xfe\xf1\x8c\xe7w\xab~\x829\xe4\xb5\x98\xb0\xae\x09\xd75'\xfen\xba\x90\x0b\x96\xb5\x18\xb3S\xa5Q#\xe6\x14T\xdb\xe6\x9ceq\xf5\x16\xb5k\x92\xb7GV\xa3\x88A\x05qm\xce\xd8aIF\xc2z\x7f%\xa5\xca\xa2\x01\x111-N\x1a\xb2I\xb9\x11m\xda\x9cQ#fI\xa5\x8b:,\x08\x04\x8ei\xb0\xa8\xc5r\xa4P\xfa\x9e\xbd\xff\xf9\xe0'\xa6\xb6\x87\xc6~\xa4\x9b\xee\xe6\xe0\x85\xecOL\x90\xd7\x823\xf9\xcd\x9f^\xde\x98\x0d\x22\xd6Ia\xc1\xa4j\xb7xV\x83\x153bZ,\xeb\xb3\xc6W\xfcK\x9b\xfc\xbdv=6XQe\xd0\xb7\x14\xeb\x945a\xd0a\xf7yI\x9dao\xd3\xa7J\xbf{M\xe83\xeb\xcf\x1c4\xeeZ\xffC\xdef+\x86\xc54\xa8\xf7\xb4\x1b\x85\xdc\xed!\xaf\xf7\x8aK\x8d\xdf\xfc\xc8?\xfd\xd6?\x0c\xfdh\xe3y!\xfb\x13t\xc8\x8f\x9f\xb7\x85\xeaoI\xfd\xd1X\xa63<\x22bI\x9bJ\xe5\x86\x95\xd8/-n\xb5\x9c\x8cyi7\xda\xe1\xa0\xdbm\xb6\x22\xab\xc8\xb8C\xc6m\x172\xa3K\xc4\x92r\xcfk\x17X\xe7\x84N\x136y\xc1zIo\xb0\xdf\x1eE\xf2\xfa\xb5KK\xdb`Y\xdcq\xefv\xc6%Uj\x0c\x88\x89\xc5\x1a\xb3\x83\xbf\xd6u\xcd\xba\xd6\xed\xa9\xd3#;\xc3#?\xb5\xba\xeb\xa7&\xc8-\xa1G\xf3m\x7f9\xb1nO\xf8\xb8\xbd\xa6\xa5\x95\x9a\xf0\x16\xa7L)\xa8\x95rF\xb5b\xc5V\xd9\xa7\xd1\x93\xfa\xdc%\xae\xc3\xa7<-\xadN\xb5m\x9e4\xa5\xdcK\x8a\xcd*R\xe2\xa8*3\x86\x8d\xd9ePZ\xab\x16\x93RR\xe6\x14\xa4%\x1d\xb7\xd7Y\x8d\x06\x14D\x1dS#i\x1a\x89\xd0r\xa1t}\xdb\xdd\xc9\x0fl\xaa\xe9\x98\xbcy\xe2\xe5\x9f\xd2C\xde\x9fZ\xeb\xa4$\xfc`U\xef\xc4\x9c\xa82!5X\xb4S\x93'l\xf2\xb0N\xe5\x9a\x1dQ!nF\xa3\x1d2\xa6\x9dS\xa3`\xb5\x9c\x0e\xa74\x19T\xe6\xb8\x16/[oI^V\xc1\x90MfU[\xb6(\xe6FKJ\x14\x0ch\xf3\x8c\xd5b&\x14)1\xac\xc3\xa0e!Kj\xc4\xf5\xaaR$#(t\xe7w\x85\xfb\xc5\xbf\xfc\xf0\xfbkf\xbf\xf7\x08>\x1e\xfcn\xe1\xe3\xe5\xbf;\xff3\xe2\x90L\xa1z~1]\x15\xa9\x12\x91\xb4C\xde\x98%G\xdd\xa7\xc4\x09\x8dV\xcc\xdaiLJH\xde\x846\x8d\xc2\xb6\x88iWlF\xc2Y\x9buKh\xb2,\xa4Y\xb5WDl\x106\xa8E\xb9\x94\x8c\xcd\xc2z\xb4\xd8l@^\xca\x8a\x22\xe5\x8e[\xfdjke\xdc\xa2V!%\xb2\xc8X\x1f\xcc\x87\x0aB\xac\xfd\xfc\xbf\xba\xed\xa1cS\xdfk\x04O\xe2\xc9\x1f{\x83\xf1\xa7\xd4~\x7fc\xe4-\xb7\x05\x85\xdaH\x97%3\xc2\x86\x0c\x88)q\xab\x93\x0e)Q\xb0\xa2\xday\x81eI\xd3\xba\x5c4\xaa\xcbi\xf3\xb2F\xcd\x99\xd7e\x5c\xde\x9c\x97\x95\xa94\xea\xa4\xb5\x16\x15\x9c\xf2F)!\xcbf\xc5\x8d\xe9T\xaaHARJR\xc4E;\x84-Z\xd4oV\xa3A\xadj\x8d*\xb3\xe0\xac\xac\x98\x0a\xd1x\xaap\xe1\xf8\x077\xff\xdc\xe4\x90\xa9\xc8\xec\xdb\xebn]\x89\x9dWl\x95R3\x8a\x85\xdca\xc0\xac\x22\x97\xa4\x94H\x0aL\x88\xa8Vj\xc0\x94\x12\xed\xae\xd5g\xbf\x1a\xb7\x99T\xe4\x15\x8bn1\xe4M\x0e\xc8\xa2\xc2\x8d\x96=\xe8\xe3:\x8dY\xf6>\xd3*\xa5\xad\xf7\xa8j\xf5\x92\xf2\xceXm\xccZC\x12nrJ\xbdECFT+\xb3C\xb9\xbc\x9c\x90\x0eSASdh\xfbu\xdf\xea\x5c9\xf7\x13M\xf0?\x15\x87\xbc-<\x99\xee\xdc\x11\x8b\x04\x228\xabU\xa0L\x95Ai\xd5\xae5\xa3\xcc\x8c-\xe6\xedr\x83AYQ\xd5\x1a\xa4\x0c\xe8\xf6\xa0\xb0'\xdd\xe6\x88:M>k\xb3\xaf\xb8F\xc8}\xcez\xc2\xb8\x8f\xfb\xe7\x16l\x93\xd3\xa3TF\x97\x93\xca\x14<'bX\xb5\x1e\xe5z-j\xf5\x82\x0eU\x8aLi\xb1(\xe7\x05/[1k\x8b\xa7,\x9a.\x9c\xdf;\xff\xd07\xf2\xd7D~\xe6\x1d2\x1aY.\xfc\xb7/\xc5CK6\x9b\xd0\xee\x92V\xc7\xd5\xab1\xe4F\x9fV\xaaRJ\xaf\xb5\x22\x9e\xb7\xc1\x8a\xd5&D\x9d\xd3\xe7\xf7\x9d\x12\xd7\xe0\x0bZ\x04N\xb8\xcb\x01k\xbd \xeaqM\xaa\xac\xb3\xddg\xb5\xe8\x11\xd3n\xde:\xdd\x86\xdc\xe1a;\x14l5)g\xdc\x82k4[\xd6\xaf m\xbb\x1e\xb7\xeb5.a\xd2\xb0\x19\xad\xca\xf5\x05\x1f\xf3h\xc7\x8d\x83\xcf\x1d\xfa\x99\x17d%\xff\xad\x1b\x0b\xef\xcb+U0!\xa7\xd9\x92\xf5r\x12\x92\xba\x5c\xe3\x19cnR.\xabD\x5cA\xcc1\xdbT+h\xf3\xbc{\x0d\xe96\xaa\xca\xb2b}nv\xd0\x9d^\xb2W\x93\xf3\x02\xfd\xba\x94)\xd2\xa6\xc5\x88*u8o\xab\x19e\x06\x0d\xaa\xd5iD\x83)i\x1d\xce\xab\x14\x96uP\x95\xb3\x22\x0a\x9a\x8dI\xaa2e\xbf\x9bs\xe9\xeb\xce\xfc\xe9\x15\x22\xc8\x7f\x0d\xeeM<\xfeC\xadM\xff,|\xee\xf4r\xa8<T\xe1\x8c7\xc88.\xad\xdc6\x19\x09\xcf\xca\xe8W%\xac\xd4\xa4au\xe6\x5cg\xc6\x90\x8b:\x9c\xb0\xd3\x17\xb5\x98r\xb7s2\xea\x0d\xe9v\xab\xaf\xf9\xb0AE\xd8\xee\xa2\xc0Y\x05c\x96\xbcA\x8fE\xebTx\xc1\x9c\x11\x1blvNZ\xd8\xa2\xb8n\x97\xec\xf5\x9cYc\x1a\x1c\xf5\xa0Y\xeb\xd4\x89)7\xa4U\xcal\xa8\xb2\xa4n\xa6\xec\xc8x\xfe\x0a\x10\xe4q?\x9c\x1c\xffb\xfb\xf3/,\xd4\xb5\x843\x12b\x16-\x88iT\xaeXX\xbf\x8d*\x1dq\xb7\x09\x0d\xa6\xedv\xc2\x82\x22\x0b\x02\xa5\x02\x1b\xa4m\xb2\xa8`F\xc19\x11\xcb68\xa7\xc1i\x9b=j\x8d\xb3J-\xdbaZ\xdc\xb5&T;+l\xc0\x88e\x19\xcbzdl5#\xa6I\xcc\x1a\x87\x15\xb9d\xb5Z78))\xa6\x08#n\xf5\xa2\x16!\x89\xdc\xc8\x91\x9a\xfd}?\xb1\xf6\xfc\x0f\x98\xd4?Z\xfa\xd1\x1fC\xfa\xbf?\xe0\xf6_;u4\xddZ\x16\x1aP\xacU\xa99)\x1bE]4!P\xae\xc9\x01%&4{R\xa9)\x09\xf7\xe8\xd5\xa6N\x9bR\x93\xca5\xb9\xa8`\x8d\x8c&\x81v\xc7\xbdY\x99./jq\xde\x9c)1\x17\xc5\x95\xd8m\xd2\xb2=\xce\xf9\x8e\x1a\xc3n\x12\xe8\xb5\xc5\x98N\xb3\xc6,\xdag\xb5Z\xef0\xa5Z\xbf\x9b\xed0'\xa7\xa0\xdd\xa8\xbd\xca\xb4K\x16\xba\x1a>\x97z%t\x999d\x7fz\xff\x8f\xa1\xf8;\xef\xb6\xfbr_\x18\xca\xd5D\xa7\xb4\xe9\x96\x15\x95WgNZ\xb9\x16ys\xd6\xd9\xa7N\xaf\x84\x9c%iI\xfd\x02\x05\x13\x0a\x22\x16\x05Rb\xc6d,+\x13\x96\xb4\xd1q\xb7x\xc5\x0dN\xda\xa4HL\x5c\xd8\x8a9\xd7J\x88*5\xabK\xaff\x17M\xda\xac\xd7f}*\x85\x9dRk\xc2\x92\x12\xfd\x9a$\x959\xafJ\x87A]J\x0d\xa8\x951\x1fn\xbc\xf6\xdc\xc3\xbf4\xf63\xd9:\xd9\xd6Uwn9\xb96qD\xa3i\xa5\xda]\x90P\xe7\xb4\xfb-{\xb3/*\xb6\xa2\xc7\x82U\xe6\x94cL\xa7\x8cNg\xdd\xaa\xc8A/j\x12\xb8\xc3.g\xa4\x9c\xb1\xce#ZddDU\x18\x17\xc8\x88\xa8u\xc1:5.\xda\xaeE\xaf\xb3*\xcdI\x1a\xb3\xd9\x117:\xaeV\x5cR`X\xa7n\x8b\xeer\xde\xbcR9\xbb|\xdb\x0e1\xe7\x14\xe9\xd3*b1W\x9c\xfbRC\xd5\xec\xcfX\x95\xf5\xcb\xf1\xa3\xb9\xf5\xfb\xb3\x95\x85\xd8\x92r\xe5\x8a\xb1\xc3\xac\xacy\xab\x0cj\xd3\xa1\xc1\xb7\xadu^\x01\x8dV\xe4\xdd\xee\xab\xcalw\xafa\x17=\xaaY\xd6\xfb]\x90t\xdeK6\xf9\xaeMb\xeaDT\xe9\xb1\xd9\x985\x1a\x1cS\xe7\x98-*\xc5MZ\xb2d\xc2*\x8br\xe2\xb6\xba\xa4V\x91\x17\xcc\xab\x92\x93\xb3Z\xb1)\xe56\xdal\xc4\x98\x06\x13N\xb8_\xca6y\xf3\x0a\xa1hp2\xf4\xf2S\x05\xff\xe1gI\x90\xa3\xb9_\xfc\xf2\xd2\xcd\xb3\xa1:e\xbaUJ\xd9\xe5\x05KV\xab3c\x95\xe3\x8e*\xf2\x1b\x8e\xebU/\xea\xbcuN\xc9h\xd7&\xe5\xcbn6'dJ\xda\xf3\xaeu@\xa7\x8d\xaa%\xc4\xbc\xcb\xd3\xf6\xfa\xaa&\xcf\xeapDZ\xbfY\xf5\x0e{\xbd\xb3\x12\xd6\x9b0\xaf[\xc1\xf5.\x99\x96W\xabO\xd46=jlsXA\x83f\xc7\xbdd\x9b9w;#\xa3R\x9f\x98~\xb5\x96\xcd\x06\xb9\x9b~\xfbo\xeeJ\xae\xe4\x7ff\x04\xf9\x8fA\xf3\x87.\xfcV}n*T\xef\x82\xedz\x5co\xd8\x06gT8\xa2\xd1I\xf7\xeaP\xed\x11K\x16\x15\xcb(5\xefF1\x17\xac\xd6\xaa\xc39\x83\x06u\x98v\x9d\xb8M\xa6T\xb8`\x9d>\xdft\xb7\x17\xec4\xeevYS\xe2\x9a\xad\xd6\xefn+\x1aL\xf9\x86\xd5\xa6d\x94\x0b\xac\xd3/\xe2,f\xdc\xe0\x82:\xe7\xed\x155mH\xbb\xf5\x9a\x0dYT\xae\xc9V\x87\xbc\xc1\x01\xd5BJD\xb2\xcf\xbf\xb1\xfb\xcf\x7ff\xa6\xac[b\x11\x87\x1f]\x9f8\x15\xfa\x05_\xb7\x1e\x81\x84&+\xf6\xfa\x86\x1a\xf3\xeeR\xac\xc37m\xd4cR \xa6\xd8\xb8*5\xde\xe0\x922\xd5\x9e\x12Qi\xb3#\xca\x04\x92\x06]R\xe9a{E\xd5)\xd1\xe3-\x9e\xd3\xe1\xa22\xe7\x0d\xa8T*l\x93\x94\xc0Q\xcbJ\xd49(o\xb5:g\x94\x99\x97\xb1\xcbK\xea\x1d\xb5\x1ew{\xce.\xc3\xeev\xd6\x87\x0c\x1bR\xe7\xf3\xee7l\x8b\x84>m\xb5\xcb\xe7\xdey\xea\x95\x9f\x0dA\x82\xe0\xdf&^\xf9\x83\xf9\xa0\xd5AM&\xd5\xca*7#\xee\xb4\xb4\xcd\x96,(\xf7\x92\xeb\xa5\xf4IZ\xeb\xa2V+\xc6\xb4\x9b\x96Rd\xc8\xb06\xf3\xceZ#\xe7\x92\x8d\xbat\x98\xb0V\x95\xb3\xaa\xe5%\x159\xab\xdbZK*U\xca\xe9\xb0N\x9fA\x1b\x9c\xb6\xde\x9cYw\xe8\xd6\xa0[\x83i!\xc5zm\x10Sg\x97\x98\xa3\xdenXZH\xa3oZ\x90s\xc6v\x93V\x1c\xb5\xceL\x10S\xd5\xf6\xd0'\xdf\x1b>\xfa\x1a\xb7\x1a\x7f\x22\xf5u_\xf6\xf7\xae\x8f\xc9KK+h0\x22\xa4\xcd\xcd\x9eT\xaa\xd3\x80\x90%)\xbb\xd4xJJ\xc1\xb4J\xafht\xaf3\x92\xaaEL\xa9\xd7\xad\xd2\x1d\xce\xa8\x94u\xc4Y\x05a-\xd2\xd6\xda\xac\xdb\xf5\x0e\x0aYVcZD\x9dEG\x85\xcd\xab\xf5m\xb7\x1a\x10Vm\xdenC2\xd2*\x94\xbf\xfa]))\xbf\xe6\xbc\xd5vhs\xd8\xdd&\xad\xb5KR\x89Fs\xd2Z\x94[P\xad>\xc8\xed\xbe\xff\xfaO\xbf\xe6\xcf\x11\x7f\x22eo{Q_\xe6\xdaLF\x91j\x11g\xb4\xbe\xfa\xbb\x05!K\x96\x8dk\x93\xb6W\xab\x83\x86\x8c\xc9+h4\xa4\xcb6iq\x17\x14YpH\xa0C\xc2E\xa5F\xf4\xfb5+\xca\x95(X\x90\x923bE\xb5I\x19\x15\xce\x0b\x14\xeb\xb4\xc6\x8a\xac\xc3\xda\x0c\x88\x9a\xb6\xd7\x9cnc\xcaU9\xe1v\xc5B\xae3k\xc1\x8c\x90\x22m\xbamq\xc1j\x11\x7f\xed~\xcf\xaa\xb4\xacR\x5cX\xca\xe9\x5c\xd9\xc8\x13\xab~&\xa6\xac\xb9\xec\xa3\xfb\x1b[\xa3V\xcc\xaat\x97\x09qY\xab\xd4y\xc9\xb2\x0e\xc3\x22V\xfb\x96\xf5.\x1a\x11WlR\xbb\x0b\x9a4\xeb\x97\xd3\xe2\x11-\xeat\xeb\xf6:\xd3*\xb1\x22i\xce~\xe5f\xb4it\xd1&\xfb5\xb8h\xc85R\x9a%\xa4U\xaaP\xef\xbb\x8aTY\xe5\x09\x9c\xb1^\xa9\xe3\xd6\x9b\xf5\x9b\xe6\xf5\xea\xb5\xc7\xa4\x8cV\xe5*]t\x9d\x9c\x13\xde\xe8\xef\x95\xda\xe9)\xb7\x9b5*nCn\xa0j\xd7\xb7\x8bF_\xdb\x0d\x10\xaf\xb9 \x9bBkc\x9d\x7f\xbb\xf6\xf5\x99\xc2\xf9\xa0\xd5\xf5N\x1b\xb7V\xa0F\x89n\xc5\xe8\x13\xd7lE\xad~C\xeaM\x8bi2'&%\xae\xc4\x9c9=ZDUY\xe5\x84-fM[\xb2GN\x9d\x1d\x16\x94\x18Uo\xd0\xa4Y-\xaau\xabsF\xca-\x8e*\xf2\x14\xb6\x9b\xb1\xa2\xdc\x05\x15*\x8cx\xc0\x94f\x9f\xd1\xeenG\xc5\xad\x95\xb5\xcea]\xd2^\x14\x95r\x8d\xbc\x0ag$\x8cZ\xb2\xc1\xa0H\xb8>uj\xd5\xe1/\xfca\xe8\xa9\xc2\x15,\xc8m\xf1x$\xf3\x10\xe9\xe0\x1d\xbemF\x91\x1eU\x12.\x985\xadVB\x9d~\x1d\xca\xb4\x992\xe9\xa4\xeb\x0d+X\xb4\xc3\xfd\x9e\x12X\xb4V\xaf%\x07mqF\x97\x1e\xd5\xc2\xd2nPe\x93\xe3&\x84]\xd2mY\x95\x94\x22I\xab\x8c)V\xa1\xd7\xbd>%\xec7=\xaaR^\xc6\xa2&}\xd6\x9a\xb6\xd3Y\x1f\xb6\xe8\xef\xfd\x89\xd3\x0a\x16\x1d\xf0!\xdf\x92\xf0\x8b\x06\xdc\xe3\xf75\x18\x12s\x83\x09;,\xd8j\xd9\xd9H\xbc\xeb\x97\x1e\xfd\xf7\x97\xaeP\x87\xfc\xce{^8\xc6\xe9l\xe2B]\xe9\xde\xd0\x98c6k6\xafI\x9frmZ\x14\xdc\xee\x9c\x17\xdd \xa6\xd2\xf3\xca\x9d\xb5C\x8f\x885\xe6U\x18\xd2,\xa3\xc8\x13\xde\xe8\xb0\xeb-Y4\xa5CX\xc1\xac3\x1a\x8d\xa84\xa3\xde\xa4\xd7\xbb(\xa9\xd5\xb0\x90\xa4E\xb5r6\xe9\xb5\xd9\xa8>7zI\xc1\x09\xbb\xcd\x88Z\x96\x14\xf1\x80\xe75\xd8\xe3\xef\x0d\x89\xb8A\xe0E\x1bm\xf6\xa2yOz\xbff\x03\x1a\x91uX\xab'4\x1b\xd3\xe2h\xe5\xc0\x97_KA^\xc3*+\xf4\xf8\x1f\xc6\xf9\xd8\x03\xf5m3\xc1\xa8rU\x0e\x983k\xb3j\x87\xdc\xe7\x88\xb5>-\xeauz\x0cz\xdefK\xd2VTX\xf1\x98*M\x8e8\xa9YNV\xafz\xd3\x1a,\xe9\xd4.\xafX\x89\x84\x9c>\xf5\xd6\xd9\xe2^\xfb$d\xe4T\xe96--\xee\x982\xe3\x1e\xd3e\xc2\xe7\xfc\x13MBz\xe4\x94\xa9t\xab\x11\x9f\xf5\x16\x05/\xd9d\xbb_\xf6\x09\x9djt\xfb\x1b\xf3\xded\xbdK\xbel\xc0\x8b\xa6\x95\x88\xa8\xd0\xe8\xb4i\xa5\xb6\xb7^\xb1U\xd6o\x85\xfe8\xff\x81\xa0\xf7\xbfw\xbd'\x19>\xaf\xf2\xd5\xe7\x0c!\x0d\xd2R\xaa\xdc\xa7\xdf\xa2^gmR\xa6\xc6\x8c\x8br2&\xad7\xad\xcb\x94\xeb\x9cQ\xe2\xac\x88\x88eE\xd2\xa2\x0a\xa2J\x95\xe8\x95\xb6\xc1\xad\xfa\x14\x8c\xdb`\x9f\xb0A\xcd\x06UI\xaa\xb7l\xce\x9d\xc2^\x96\x90U\xe9\xb4\x01\xd569-\xee\x0e\xc7\xecUd\xd2\x8aN9Sb\xee\xf2\xbc\x0d\xe6\xd5\x1a1\xef\xbckE\xe5,;cT\x97s\xe8tNM~h\xf1\xb1\x8a+\xd4!\x7f\x9c\x7fWb>R\xf3\x96\xfe \xb0\xc1\x84\x0aK\xaad\x9d4n\xd1>\xdfQ/.-,\xa1\xc6*\xa3\x8a\xcc\x8a\xda\xa6\xdbuJt9\xa8\xd9#*L\xb8V`\xd2F\xf3Z\xd5[\xab\xd8&!a\xa7\x95\x99V\xab`L\x93\x84\x94\x1a\xcc\x9a1\xa9I\xd6\x90c\x9e\xb1\xdb\xb0%\x1bED$\xec\xd0\xee\x06;\xedS\xaa\xda\xa2\x82\xfbTz\xcc\x84\xb8\x22_\xc1\xbc\x0f\xe9\xf3\xa2\xa4^\xcf\xd9\xe9e\xab_=\xa2]\xc8/\x97\x7ft\xf3\xa9\xe0\x0aM\xea'\x0b\xef\xda\xd6\xf7\xe1LP,g\x95J\xcb\x1aM\xa8P\x84\xb5.\xa9p\xc4&\x1b\xf4(\xd3o\xb5\x97\x04\x0aJ\xe4\xcc(\xd2\xed\x06O\xa8\x15\x91\x13\x92P0\xa0\x05\xb3\x12\xba\xaduNX\xb3\x12u\xca,+\x96\x15\x115-e\xb3a\x1d\xe6\xad\x16\xd5\xaa\xce\x84\x12\x17\xac\xa87\xa5U\x99\x0a3.\x89\x89\xd9bY\x87Q\xab\xe4\x84\xa5T\xdab\xc1*/\xdb,\xa4\xdbZ\x03Vd$\xb4[\x10\x95\x0d\xd6\xe6^\x1c\xfb\xca\xbe\xe1\xc2\x15)\x88B\xe2\x13\xd1\xae\x0d\xe6em1\xa9\xccw\xec\xd0\xab\xd6\xad\x0e\xaa\xe6 \xe8\x08\x00\x00 \x00IDAT\xf1\x8a\xfb\xd4\x1a\xd3g\x8d\xed\xce\x99\x17HXP\xad`\x8fZ_Sl\xd1\x16E\xbam\xc5\xbc\x06Io\xb6\xdf\x0d\x1e\xb5\xdba\xab\xcd\xc8\x1a\xb6\xec\x82\x01\xe5\x06\xdd\x22\xa9\xce\x80I\x816+\xba5\x1b\x94\x93Tl\xde\xeb\x94jQm\xbfw\x8b\x1a\x94\xd5i\xd9~\x07\x5c'\xa7\xc2Y\xb5\xaa|\xc6\xb0a+n\xb1\xd9\x19\x119\xa3\x06\x95\xa8r1\x08\xa5W\xfar\xdf\xbe\x02\x04\xf97\xc1s\xff\xc8\xd5\xcd\x1f\xdf\x5cz,\xd4\xa8\xd33^o\xd95\xbe\xa1\xcb\x98\x03\x12\x02!7\xfa\xa6V9K\xe6\x14\x19UP\x22-\xa3\xc4\xb8\x19\xf3J\x85\x8d;o\xa7\x15\x19+\xd2j\x0c\xdb\xed\xa4\x0drZTyR\xab\xb0#ZLk\xd7\xe9ik}\xde\xf5V,\xe92f\xc1E\xcd\xce\xc9\x8b)hSP\xe6\x92JGq\x97\x0b\xfa\x95\xdb`J\x89\x84.u\x8e\xf9\x8a\xb4\xa8\xdd\x0a.xAZ\x91\x84\x22\xad\xe6de\x04\x91\xe8\xb5\xcf\xfe\xde\x8de\x83\xe9\xcb<\x87\x1cH\xfc#\x17\x1b\xb7\xb5?\x97\xbf\xd9\xb4\xd3\xfe\x9d'\xe4<\xe7]\xde\xe9\x92\xedF\xe4\xec\xf6\xaf|\xc4\x11\x11\x09M\xb2\xc6\xe4\xcc\xa9S\xac\xccV\xfb\x95\xea\xd6dL\x85\x11\xb7\x1b\xf0&a3\xdae5\x1bQ\xaa\xcc\xb7\xac\xb6\xcf\x1e\xf5.\xa87\xee\x90&g\xfc\x96qi[\xf5\xdb&\xa3\xddj\xd3\xca\x5ct\xab\x0e\xcd>\xadVT\xd8]\x1e\xb2C\x9b\xb8O\x19\xd6g\x9d\x7fg\xce\xb4\xc6W\x7f51\xef\x17\x15Y0\xa1\xc6\x9c\xb8e\x05\xd7\xa8\x12x\xdf\xc7\xf6/\x5c\xf6\x0e\xb9o\xf5\xcb\xd3\xff,\xf8\x7f\xb7\xa7\xdf\xb8\xf3\xc4{\xdfXx2\xd4\xa1\xc1\xa7|@\xaf%\xe3\x9eQ\xe3\xb85\xb2\xda\x85\xbcl\x8b\x8b6\x99sH\xc4\xb2&\x05T8\xa0\xc5\xb2\x0a\x13\xdad\xdc\xe3\x9c\xf9W\x1b\xf3\xed\x0e+\x93\x113\xe1\x01O\xe8\xf2]o6\xacJ\xc6*\xf3\x9a='\xb0\xcb\x9c5^\xb2\xa4\xda1\xd5\xa0\xde9\x1b\xed\xb0`\x87\xe7|\xc7\xad\xe6\x1dP\xa1\xdcEUzm5\xef\x90Ry)\xf3f}\xdbM\xc6\xd5\x1b\xf2:G\xed\x10\x18p\xda:\xa3\x85\x0b\x7fw\xd9WY\x7f~\x81\xbf\xfa\xff\xcc\xac\xd1P\x89i\xe5\xaa\xd5x\xbf?Q\xf0\xabnQa\x8dVCv8\xa2J\x19\xf68\xa0\xde^\x0b\xd6+5i\xce-\x0a.\x08)\x11\xd6m\x83)\xe3\x16\xb4:\xa7\xc8!\xedf\xcc)\x96\xf7m1}\x1a\xecw\xa3E\xc5\x86\xd5\x08\xab\x93\xb7\xdf\x9d\x9e\x95\xb6\xd69Y\x8b\xc6u9\xea~\xe7\xbc`\x9d\x1e\xd3\x12Z\x9d\xf1\xef=\xab^Z\xbd9\xbd\x8a\xfd\x89\x98i1'\xbc\xc5G\x8c[2f\xd9?\x18q\xc8\xac\x06e\x96%7\x5c\x81U\xd6\x07B\x87\x0a\xab\xff(\xb71\x15\xae\x91\x10\x96r\xabi\x8f9\xea\x1e\x9fU\xa2\xde\x05\x1dV9c\x8fc\xe2\xaa<!%oZ\xe0.\x83\xc65\x0b\x9c\xb0MJ^\xbb:\xdd\x1ae\x15\xdbbBR\xbdK\xba\x0c\xc9)\x91\x962$dZTX\xc1\xb0jQ1\xab\xc4\x9d\x94\x97V\xfa\xea\xd7e\x8e\xd8\x22n\xcc\x98r\x93\xd6I\xbah\x87\xc7D\x1d\xf5\x06\x9b\xf1'\xd8\xa2_\x9d\x15\x8f(\x12\xb3\x22\xac\xcc6!c\x08k\xce\xa5\xaaZ\xbfyq\xe4\x0a[\x87t\x07\xef\x0e\xcd4\xd7\xea\xd0eY\xb5\xef8\xa4\xd8\x8d\xde\xe3O\xbdG\x06I\x19'\xa4=\xe6z)ae\xea\xd4*7/\xa9\xc7\x9c%\x17|\xc4\x90R;U\x9b\xf1V\x07\x85\xe4\xad\xe8TeA\xbf\x94-\xe8r\x83\x12K\xae\xb5\xc3\xa89}\xee\x92\xb3\xe8\xa8\xa4n\xeb\xd5HJ\x9b\xf0z\x87l\xf05\x95\x1e4\xa2G\x97Q\xb7\x98\xd7\xebZe\xdek\xc0%\x95\xea\x05\x0e\x89\x99w\xab\xb7K\xaasA\x99\xa43B^',j,\x18\x94\xab\xbc\xe2\x1c\xb2>\x9c\x92\xfc\xc8\xe6\xfa\xee \xac\xd6Y\xb7\xa86\xa7\xd3\x93\xd6{^\x89\x19MR^o\xc09\xd5\xa2\xe2\x0e)\xb1\xacT\x992G\xd5\x18{5\x19O\x18Q&\xea\xdb\xd6\xa9uQ\xb3#:\xf4\xb8\xde\x88^\xc5\xd2\x96\x05\x12F\x14\x1b\xb5\xd6z\xa7\xd4K\xb8M\xb7\xb0\x94iq9\xa5\x96\x04v\x88\x0a{\xdc*S\xeam\xf4\x1d\x1d\x02\x9fT\xa3[\xcc\x94\xac\x82\x05\x11\x81\xc0u^\x12\xe8\xb1\xd3\x19\xf5\x02)\xa7e\x14\x84\x82\xe2|\xb4\xa8\xfb\x1f\x1e\x0c\xce^I\x0e\x99\x11KT\xd7O\xdb\xa2V\xb5\x982\xdd\x18\x926\xadY\x91r\x05-\xca\xa5EM\x88{\xca\x16\x03\xaeQ\x90\x10\xd5(\xebZ3\xc6\xec0\xe2N'\xd4Xm\xc5\xe36KX\xe3\x9c[\xbc\xa2]\x99\xacYcJD\xf4Z\xb6\xd9\xa4\xe7\xc5\x15\x997!d\x5c\xbf))\x19K.\xaa3\xe2\x1e\xcf\xd8n\xc6\xa2E\xa3\xd6;c\xcc^}\xf6Z\xabJ\xcevy\x93\xe6]\xf2\x98*\xfd\xea^\xdd`\x9a\xb6(\xa4L\x89R\xe5\xf9\xe5-\x7f\x1ei\x8a^Q\x0e\xd9\x18\x1am\xa9\xf9\xdd\xce`Q\xad\xb0\x9cq\x0dRJ\xa558'\xa3\xa0\xca\x8a\xc3v\xa9\xf1\x8a\x0e\x1b\xad\x982\xa1\xc9z_3\xa3\xd1Y\x9d6Y2+a\x87IG\xb0\xc1\xa8-\x8e\xaa3\xad\xd8\xa0e\xa5\xb26H\xaa\x97\x96\xb3\x88f\x0b\x86\xd4(\x7f\xf5\xb0AFL\xd8\xeb\xf4(\xd7e\xce\xb4.\xcfi\x16\x91V\xabQ\xb5\xa7u\x1a\x17\xa8\x92\xf0\xbc\x15\x1b\xcc\xc9{\x83\xe3V\xdbo\x9dU\xc6\x94\xab\x11\x974\x86\xe2 W\x7f\xf0\x8fB\xa13\x99+\xc6!\xbf\x1d|7\xdb\xd53\x95\xec\x14\x98\xf0\x92\xbd\xe6\x84L\xfa\x92=\x1e\xf1&\xcd\xa6L\xe0>c\x0e\x89\x1a5gX\xa7\x9c\x8c2\xf5\xdaTY/-lA\xb1\xd3V;\xe4\xc3\xfa\xf5ku\xdcv+\x16\x15\x09)\x96\x96\xb7\xde\x82}Ju\xcb+\xd5gBFJ\x8b\x1e+\xda\x15[\x96u^H\xa9\xa4\x13\xd6\xf9\x86*\xe7%t\xe8\xf35S\x8a$\x05^\xe7QO\xf9=\x0b\xf6\x1aU\xe1\x90\x06\xd5vZoH\x85zO\x19\x92V\xab\xdeB0m[\xe3\xc3\xf9\xcb\xc2!\xef\x8e\x5c\x13:\xfe}C\xd9\xe7\xa6\x7f\xd5|C]\xd1\x11\x8d\xfa\xbd\xce\x13V;\xe7f;\x1c0\xeb\x82QQ\xd5\x226\x99\xb4$\xadN\xa5e/\xd9a\xda\xa8q\xf3\xa2V\xb4\x9a\x117\xa3]T\xc4a\xadR2\xc2\xaf\xeev\x8c\xcbI\x9a\x14\xb6\xd1\x9c\xb8\xdeWw\xa2\xb0E\xb7\x12K\x96\xf4\x8b\xc9(\x13\xd1)n\xde\xac&\x15z\xac\xd8\xa6I\x5c\xa9\x94\x83\xeep\xc2N\x87\xdc\xe3Z\x7f)\xeb\xa4\x1a-n\xf0M\xf5\x96<\xaeE`J\xa3\xb8%\xf5\xa6e\xd4\xc9l?\xf1\xa97\x06\xe7~\xfa\x0ey\x7f\xe8W\xbe\xefk\xde\x1a~0\xa8\xbe#\x1dY6%p\x9f\x87\x04N[r\xd6w\xdc\xa4Z\x91\xcdr&\x95x\xcc-\x965\xb8\xe8\xa2:\xab\x9d\xd0$\xa7B\x8b\xa8\xc0\x92\x0e{\xf4(\xb5OV\xc8\x88\x15\x19ow@\x95I\xc3bJ-\x8ay\xc9\x8a.\x95&\x1dP$i\xbf[\x9d\x15X\xb0[\xd6\xa2:Q\x07\x0c\xf8\x98jS\x0eiS!\xed\xa2\x1a\x13\xda\xac\xf3\x98N)\xbf\xe5sF\x85l\xb0\xda\x98\xa3\xa6|HR\x8b\x84b#\xa6\xb4\x19S\xe3\x15\xb3\xc6%S\x17o\xe3\x1b\x85\x9f\xaaC\x0eD>\x99\xe73\xb9\xcf|\x8f\xad0/'\xfe{\x963\x85\xdd7\x0d\xfdA6T\x11\xd4\xc8\x9ap\xa3){\x8d\xa87d@\x93S&e\xb5\x89\xb8\xd1>\x95\xfa\xcc\xdbi\xd9\x94j\x13\xa2\xb2\x02+\xd6\x9b\xb5\xeb\xd5\xa9\xe8\x01S\xe2\xf2V\x89\x1a\xb7V\xa5\x12e\x96LX\x8bw\xa9vL\x9d\x05\x8dF4H9\xadK\x91e\xfdrJL\xba]\xcc\x05a79\xed\x9d>\xa9I\x91=Niu\xd6\x05\xf7\xe8\x13\xd1\xef}F\x9c3cT\x9d\x12-\xce:\xeb\x82J\x97T\xb9\xc5\x17\xbc\xd5\xb3\xee\x92\xb3\xd5Bh}\xe0\xe2\xd0\xb1\x9fn\x0e\xf9\x01\x9e\x02\xecI>\x11<\x13\x14JN=\x7fC.\x1e\x0c\xba^\xbf\x0e/\xda\xe2\x88\xbc\x84\xdb\xb4:\xa5\xc5:\xe5.\xa9S&\xa9F\x916)\x04\xfaQ!i\xc6M\x86ls\x5c\x9d\xb46\x05\xbdBF\xed\xb7\xcd\xbc9\xbd\xd6aL\xe0\x94u^\xb0\xcfo\x98\x95\x14\x17\x96\xb3\xd6\x88F\x1dzD5H(q\xd6Q\xd5\x9e\xd1\xaf\xd87\xddjL\x83J\x11=\x9a\xb49\xa0\x5c\xa39\xfb\x9dT\xe2\x8cu\xce\xeb\x145\xe8vEf\xdc(\xe6\x98VO[\xe5\x98\x9c\xe7\xe5\x83\xc5Bh\xfdO9\x87|\xf2\xfbf\x8e\x7f\x08~%x\xb8\xec\x9f\xa7\x9e\x19h\x8eNE\x1b\x85\x9d\xd3h\xc8&Kj\x9d6%\xaaG\xde$\x16,[e\xd2\xbc[}\x17ye.\xd8h\xdc\xbc\xb8\xb5Ni\xd1c\x87%c\xa6\xcdz\x93\x88\x15]\xfa4z\x83\xb8}\x92\xaf\x1e\xccYcYR\xb7\x9c\x98!\x199\xfb\xac\xb2U\x8f1\x05Y\xb3Z\x8c\xabT\xec\x8dF\x94\xa8\xf05\xf52\x16\xac1g\xd6I-\xf2\xee2eA\x9d\xbc\xbcA\x81\xdd\x0a\x16\xf4\xd8aH\xd8\x88\x05\xeb$\xd5[\x85v\xe1\xe0\xbc\xa6\xadg\xfe\xf3\xbd\xb1\x0b\xb9\xcb\xb8\xcaz[!\xf07\xf3w|\xad\xb21\x15\x8f\xca(XQ\xa4\xc6\xb2\x94\x11\x0d\x16U\x09k\xb7\xa2 \xa3\xce9-\xae\xf3\xb4j\x19\xc5.\x88JZV\xa5\xc3A\x0d\xaae\x05\x9a\xcd*(2l\xd0\x94~\x1bl\xf3\xbcG\xdd\xa4\xd3\xa0\xdb\x14\x1cU\xa2]X\xa99+\xaaM\xb9C\x959\x836\xc9Xk\x83bs\xaa\x9cP\xad\xdd\xbc1-\x8a\xfd\x13\xdbM*8oI\x89\xed\xfa\xb4\xc8:o\xbfNi\xbb-\x09^=\xad\x18\x08\xd9k\xb3\xa8=R\x86MZR\xa1=\x98\xa9\xba\xe7C\x8f\xa59\x18\xbb\x8c\xd7!\x9f\xf3\xc0{s\xbf\xdd\x92K\x84\xd2\x86\xadVdA\x9d)\xed\xd2.i4\xa8\xd8\x92\x0e\x07m\x14\xd6,-\xa9\x5c^`\x5c^\x93~\xab-\xe8\xd1eRJ\xb1FYs\xa2\xeet\xd6\x16C\xdae,\xa1\xcbq\x83j\xbdh\xa3\x0a\xd5\x8a\x0c\x99\xb7\xa0Y\xc8\x82QM\x96\xcc\xeb\xb5\xde\x90Q\xab\x04\xc6\xd5\xc9\xca\x192l\x93\x9c\x98E\xc7\xa45ipF\xc4\x82\x22\x11q\x93&L+\xd1\xaa\xc2\xa8\x98\xa8E\xf5\x22F\xf4\x99\xb2\xcb\xa82\x03f\x8d(\xb1\xee\x89\x03/\xf1\x81\xc2'\x0a\x97\xa9C\xde\x13{w,\xf7;wd\xba\xc3\x8bv\x0b\xe9\xb3\xc5Ns\xb6\xf9\xb6\x97\x144\x8a\xfa\x1d\x87d\xed4\xaf\xdf\xa0\xf3J\xc4\x9c\x97V\x879!i\x13\xee\x90\xd4\xa4\xdc6G\xcc\xcaZ\xb4\x88I\xf3(\x92\x128\xeb~\xefR\xe4v/[o\xc4\x05\x1f\x13\xb7N\x93\x1e5\xea\x04\xfa%t\xca\xe8\x102\xa3HHV\xb5i\xab%\x1c\x91\xf1\x8cJ\xef\xb1\xd5\x11\x11\xd7H\xb8G\xde\x9d\x9e\xb0\xac\xd9\xeb\x5c\xe3\xb0\xef\xba\xc7\xa4\xdd\xb2\x8ex\xd9\xa4\x9b$\x9c\xd7fZ\x89\x22\xd7\x1b\xf3\xca?\xe1\xc6`W\xfe\xb2u\xc8\x07\xf2\x8f\xbf\xa7\xec\x7f\x1b\x0c\xaf\x11rI\x8d2\xc3\xba\xddd\xc0\x87<\xa2\xd8S\xea<d\x83Em\xb2\x06u\xda%\xa9W\x9d1%r\xca]c\x5cR\xb3\x0d.\x08Y\xb1]\x83\x93\xdae\x15TzI\x83\x84\x8bj-\xea\x910\xe8\xacz\x05\xa5j|M\xbd\x943v\x98y\xf5\x0e\x9f\xd1hA\xce9\xf78gQ\x83rq=\xaa\xcch\xd3i\x83\xdf\x17\x95\xd6\xf3\x7f1w\x9fQ\x92\xde\xd7y\xe0\x7f\x95\xab:\xe7\xe9<\x9d&\xe7\x88\x19\x00\x83D\x90\x04)0G\x914%K\x94l\xc9^9\xae\xc3j\xd7\xd6\xda^[\x96\xceJk\xd9\xb4-\xc9\xb2\xb4\x12)\xcaLb\x02@\x10\x89\x00\x07\x18`8\x83\x89=3=\x9ds\xaeN\xd5]]y?h\xd6\xe7\xd8g\xad/K\x90x?V\x9d\xf7C\xd5\xf3\x7f\xde{\x9f\xfb\xde{\x1fu\xfe\x8a\x0b\xae\xfa\xa1>}\xe6\x15\xf4\xeas]\x95-\xdfrF\x9b\x03F\xcd\xc9\xa9rKL\xb5ZK\x02\xf9\x5c\xdbco~\xe7\xbf\x91\x22\xdf\x09|*\xfc\xa7\xc5\xb7\x09C\x8e\x04~\xb9\xd4\xfc\x07\x81L\x99m;\xf5\x9bQk\xc9Q_\xb0\xe4\x97|H\xcc\xc7\xcc\xf9%\xe5x\xce\x8a\xa0-\xd7DuzH\x9bYiaC\xa2\xaa\x15\xbc\xa8\xd9\x07\xac\xa90.g\xc2\x0f\xfd#\xf3v\x9b6\xe4\xf3\xbe\xaa\xd7\xa3\xbe\xed\x90*\x13\x92>dI\x99;f}\xcc\x9cy\x1f1g\xcb\x87\x0dZ\xb1\xd3>\xa3\x9ad\xb5\xd9\xd6\xef\x80\x01\x11\x1f5\xe4\xf3\xfe\x8di\xdd\x0e\xba\xeb\xcbN\xfa\xdf\xdct\xde\xb3\xd6\x85\xdc\xf2\x03\x1fu\xcb\xb2\xd3\xce\x9b\xf3m\xf7\x0b\xfb\xacW\xfc5\x1b\xe2\xe6\xad\xc9\x86\xeb2w\x1e\xfco\xff\x83'KO\xe6\xdf6\x0c\x99\x17\xf9\xf4\xc7?<\x1d\xec\x0b\xcc\xdb\xd4\xe1!O\xeb\xb0\xe83R\xea\x0d\xbam\xd6\x19\x97$\x0c9\xa8\xd2\xb4I\x7f\xdd]y\xb7ED-\xa8Ro\xee\xde\x80\xc1\xb8A\xed\x9a\x0d\xd9\x90\xd3\xe9\x82\x16#\x0e\x8ax\xceg]0\xe9\xdd\x06\x84\x15\xc5dED\x15\xb4\x99\xb0G\xce\xaa\x1au~`\xb7f\xd7\xa5<`JA@H\xde\xa0-;\xa4|\xcc\x90\xaf\xe8\x90\xb4b\x8f\xb0i\xbf\xeb13\x1e\xb1\xadS\x83\x0f\xf8S\xa7\x14l\xd9k[\xc9\xb4\xac\xe7\xf5xM-\xfal9b2\xd0\xd9\x9d\xfa\xfc\xfd\xc1\xc1\xe2\xdb\xf0\x91\xf5`xO$\xfe\xd5\x03\xd5\xb3\xc1\x88F\xb7\x15\xac\xe9T\x10\xf2\x82\x1ey\x9f\xf3\xacJ\xe7\x9d0jT\xcc\xaam\x19?k\xc4\x82\x80\xb8q\xf7\x19\xb5W\x9dZWu*\xaa\x14\x14\xd2\xe4\xbc\xbdb6\x94p\xd1g\xddQaS\x8b-\xd3\x92\xcal#-\xe9\xc3\x9e\xb3f\xc6\x9aV\xad.j\xd2-\x83\x15\xe5V\xcc\x8aX\x93\x12\x11rB\x87;\x1e\x92qET\x871G\x95\xab\xf4\xb2*c\xf6i\xf3\x9a%\x1f\xf3E\x1d\x86M\xdb%\xa5JDP\xc8\x9cv\x19)-\x9e\xd7\x10P}\xe2\x0bg\x92\xcf\x94\xde\x86\x80$C=G\x83\xff`2\xd0\xab\xcc-\xc7D%d5\xa8pV\xda\x84Y!\xc3\xdeo\xc19UXW)gP\xbdFM\xa6E\x15\xbc\xd7\x8c\x05\xdb\xf6\x1a\x93\x92\xb2\xcf\xb2\xeb*$ui\xd3\xe8\x9aNI4\x19\xb5\xd3\xa2M\x15&\x1d\x90\x15\xd5\xed\xeb\xc2\x8e\xca\xa8\xb1\xed\xae\xa8*\x05U\xee\xaa2+k\xb7\x0d\x05\xcdZ\x84\xac\xb8\xe51\xafJ))\x19\xf0\xab\xf2nH\xcb(j\xf5\x9anG\x15e\x0c\x8a\xca+\xb7\xa2\xcc\xbc.a\xdb\xee7o\xcd\x01\xcfx\xa7\xd6\xc0\x84\xed\xe6\xff\xf3\xcboC\x1dr4\x90\xca\xcd\xfe_1a\xcb\xb6\xf5\xe8\x13\xb3\xa6\xc5C\xb2\xd6\xcc\xdb\x833\x02V$\x8cZ\x11\xb1j\x87-w\xec\xd0n\xd8\xb6%\xdd\xca\x0d\x0b:j\x5cI\xc4~\x7fh\xb75\x9b*\x8c\xbb\xcf\x0d;D\x94<\xe2\x19\xedF\xa4M\x9bV!\xe3\xb8\x0a\xb3\xea4\x990)fY\x85je\xfa\xe5\x04\xd0k\xdb5\xabr\x02^s\xc9\xe7\x1c\xf7[*\xddUn\xd3{\xach\xbewH\xea\x15\x9c\xd1\xee\xa6n\xcb\x0eyYB\x995\x95\xc2\xe6\xac\xa9sUV\xd4\xbc3f|\xc9\xce|\xf4c\xffr\xff\xdb0\xcb\x9a\x93-\xfb\xf3\xff\xd0\x98;\x14J\xd9\xd6i\xd3\x90\xc3\xa6-K\xfa\xb6\x0eE\x09!wL\xabW)\xec\xae\x886\x1b\xf7zG\xba\xac*Z\x14\xd6b\xd1\x80F\x09A5\x1eu\xc1\x1e\xc3\x1ae\xa4t\x9b4\xa7KR\x8daMbV\x15\x8d9kCIX\xc9\xa6a\xef5j\x87-\xb5\x12v\x99\x91\xb2iC\xc2\xb2niu\xb6\x1c\x93w\xd1/\xf9\x96jIYg-;\xaf\xc7-K\xa8P\xa1\xd2\x86\x05\x09\xf3V\xf5H\xc9(\xb3\xcb\x80\x90\x1a#j\x1d\xf6\xaaV\xf3\xce\x99\x0d\xa6\xb3K\xeb\xb7^z\x1b\xea\x90w~\xaaR T/\xae$\xe6\xae\x07]\x14s\xc7\xfd~]\x93\xeb\xd6\x05\x85\x95\xa9\x13\xb3!&\xed\x8e\x88*U\xf6;/d\xdc!\x9d\x16\xb4i\xd4\xac\xe8\x94\x12>\xe8UAS\x0eI[\x93\xb7\xd3\x98\x06\x17|\xd8\x90\xb0\xc3R\x9a]V%(c\xd3\xa6\xa3\x06T\x18\xb7\xcfu\xe5\xe6\x9c\xd4dUN\x9d\xac\x0dQ%\x8b\xdeo\xd2n\xdf\x13wK\x97\x87}\xcf]\xd5\xbe!\xa1\xdc\x0eM&\xcd\xf8\xb8\x8b\xfa%\x9dQ0\xea\xb0~\xafz\xd0\x11\x13\x8e\x08\xbb\xec\xb0\x8b\xe2\xce\xcb\xbb\x13,\xfd\xd5\x03\x89Gbo\x0b\x86\xfc\x83\xc0\xab\x7fQn\x8f4Gw\xfd\xf9Jl3\x1c\xb5\xe2\x01\xc3\xaa\x8d\xd9\xa3F\xafW\xcc\x98w\xbf\x80\x93\xa6]\x16\xb4\xacZ\x9f\xacJcz\x15tX\xb3\xa4LR\x15\x92\x96\xd4\x99U\xa6hI\x97qc\xf6\x1bu\xbf\xab\xaa]\xd7gY\x95k\x8e\x08\x19\xc3\xa6cB\xb6\xb4\xdd[8Pf\xca\xa3\xae\x8a)WP\xe9Ym*\xa4\xa5\x04$<b\x5cJ\xa5\x0b\x0eYPg\xc9\x15\xe74\x1a\xb4\xadU\x99I?cD\xc4\x15\x8f\x1b\xf1^\xffA\x5c\x95V!\x07\xbdj\xcc1\xaf\xbb\xdf\x84r\x87\xad\xa9\xd0-\x16LT\xcd\x8e\xfc\xf0\xd2\xdb\x82!\xbfq/\xbb\x18\x93\xaa\x1ai\x8dGj\xf59\xebe\x11[\xde\xe19i\xd7}\xc4\x94\x9f\xf6\x0d\xb7}\xd1\x1d\x9f\x91\xf6.\xb7U\x182#f\xc0\x96\xa7\x94\xa9\xd6 .kB\xca?7\xa6\xd1Um\xea\xfc\x96}\xc8\x0ax\xd1\x11\x17\x1c0e\xd4>I\xc3\xaeX\x95\xb1\xa4VT\xc8\xa0\xa3\x86m\x8a\x8b\xb9\xa4Y\xce)\xbb=\xe3\x93\x8af\xad\x08\x89\x8b\xf9\xa6\xb0\x0f\xb8\xa4\xd7\x9ay\x05\x93\xde\xaf]\xb5~\x11\xaf\x1bp\xd4\xbf\xf5\x84\x15g=\xaf\xc5\xef\xfb\xaa\x11\x1d\xbe&h\xdd>\x15r\xfe\xaeK\xfew\x97\xdc\xb5`\xc5e\x8dR\xa5\xa3\x9fW\xf66`\xc8?\x0b>^\xf5b\x06f\x8b{\xffJ\xf4\xc9H\xa0\xd9-\x17\xfd\xa2\x9417\xbdK\xc9\x83\xfe\xc4\x1e\x97\xfc\x92\x01\xef3m\xc4\xa67\xf5xZ\xafr\x19\xf5\xd2~\xda\x88E\x01\x15\xceJj\xf2e5*\xb5\x1aTi\xb7KR\xe2V<bR\xc8\x80N'\xbd\xe6\xb8\xb4r}\x96D\xf4\x9aR\xd0\xe7\xbc\xdd\x16T\x1btR\xca\x94f\xfd\x8ez\xf1\xde\xc8CF\xb3i\x95\xc2^\xbd\x97)\xc5\xad;+\xe0y\x9bj,Kh\xb7\xc3\xa7}\xd9\x017=a^\x83?\xd6\xa7\xa4K\x99q\x05g=oI\x85\xdf\xf1)\x0b\x8e\xaaQ'l\xa3\x14\x8eT\x8fN^\xf9\x893\xe4\x9f\x14\x7f\xf5\xbf\xbaa7\x9f\xdd\xd2gQ\xde_s\xdb\xac\x1egE-\xfb\x96\x1ec>\xe67d|\xd1\xc7l\xaa\xd6n\xcb\x19c\xc2\xcaL[\xb7\xad\xdc9We\x94\xcbkT-\xac\xde\x90\x9d6\x0d\xca\xa8\xb6j\x9fi\x05A56L(\x173\xaf\xd5\x01c\xaa<\xe5\x9d\x82F\xb4\x89\xa8\x91\xd4\xee\x96\xd7}\xceS\x1e3\xa1\xdc\xa6\x88\x88\xb4\x8c\x88\x15\xdb\x8e\x1b\xb7\xae\xd2O\x89k\xf5I=6\xfc\x00s&Mx\xd6?\x95\xf5q\x11\xa3j\x95\xfccW-\xb9,d\xb7g\xfc\xbcmY\xe7<\xe7\xa4\xff\xa4\xd7\x15_\xd1\x18\x1c\xcc7\xff\x93\x7f\x9c\xf8\xdb\xd1\x9f\x18C\xbe\x1b\xff\xc2\x7fW (\xfb\x8f\x87+.\xab\xd3n@\x5c\x8b9e.\xea\xb4S@\xd0\x1b\x1e\xb3\xe4\xb4\x9b\x02F\xc5U\x1av\xc0\x86*)q\x19%\x1b\xca\x14E\xd1\xafU\xdc\xac\x80]6,\xd8o\xd2^\xb3\xaa\x85M)S%*g\xdcI!\xaf\xd8\xb4\xe5\x09\xdf\xb7\xd7\xa06+\xe6\xedv\xd7!\x11o\xf8\x84\xeb\xb6,\xc8h0\xa6BR\x8fu\x1fw\xc3\x13\x86\xa4-k\xd0\xe3iS\x16t\xda\xf4a\xbb\x1c\xb5\xdb\xef;\xed9/:\xe1\x8a\xbd~\xc3)Q\xed\xaa\x0d;\xeeY\xbbl\xa8Wf\xc0_\xf3\x0dg\x1ds\xd3c\xc1W\xaa\xa3\xb5\x7f\xf0\xed\x9f\x0d\x5c\xfd\xc90\xe4\x89\xed\xff\xfe\x93XC\xa4\x98RmQ\x952E}\x0a\xce\xaav^\xcaN%\xaf\xca\x19Uk\xc5\x0e\x0b\x9am\x0aZ\x14\xd6*\xaeS\x9dy\xcb\x9al\x1aF\xbb\x05%g\xf4\xeb\xb4\xcfsZ\xcc\xa8\x16\xd4*\x22f\x87\xb86\xad\xd6\x0dJi\x917\xa0K\xbd\x92\xeb*=\xe1\x87\x22RJ:\xad\x98rDD\xc2\xa6me\x1a\xac\xd8\x94RiQ\x8bme\x0e\xb9\x22a\xd19eV5:\xad_\xa5\x07dU\xd9\x16\xd4h\xc6\x07<d\xcd\xb2[\xe2\xde\xd0h@\x8d\xa7UX1\xa8\xc2\xcb\xd6mz\xd6\xde\xec\xe2/3\xf2v\xd1!\xa5\xe0\xbf\xfb\xb5x\xa1<\xb8[F\xd1\x90e\xb5\xca\xd5\x9a\xb0i\x9f\xd7\xed\xd7lZ\x83Z\xa3\x82\xaa\xa5\xcciP!jZZ\xab\xb4jK\xce(\x13\xd0\xed[NH\xb9\xe6\xc3n\xcb\xab3\xa7V\x8b*\x8b26\xc4$\x84,\x09(\xb7\xd3EUb\xf6\x8a(\xb7j\xa7[\xba5\xdbV\x10\x12S\xe5\x96y\x95\xd64\x1a\xd7g\xcb.\x1d\x96\x95yY\xbd\x90:\x03\x82\xd2\xc6\x14\xb59!\xaa\xc5\xa8\x11y\x0f\x09\x98@\x9dJ\xb7\x95\x8bXUeC\xbd\x15]r*5\xbac\xc9\x83\x06\xb4\xc8k\x09\x16\x03\xfb\xb7\x9e}\xf5m\xa2C\xfe]M\x99\xdb\x1a\xbcd\xddq\x9d\xca\x8c\x1b\xf5\xa6\x9fw\xc0\x865\xd7\x8cJ\xfa)\xb7\x9c\xb0fL\xbf\xc7\xdd\xf0a\xe75K{\xc1\xa7\x0di\xf6UMV}\xdf\xbf\xd0o\xd5C\xb2\xba\xbd,-!\xec\xfbZ\xd5*\x97rK\x8d\x1fz\xdcU\x19\xdd\x1a\xa5Mi\xf5\xa6;\x1e\xf3\xb4^\x01s\x8eZ\x92\xf5];\x95\xa9\x95U\xb0)bKR\xd1\x8bj\xa5\xed\xb1 /*\xa3LBP\xb9f!\xcf\xa8S\xa5\xc6-\xaf\x9at\xdb\x93\xe6\x5c\x96\xb2(\xe3Q/ksG\xce\xf7\xe5\x5cvS\xab\xc7\xdcQ\xef\xae\xa0\xbb\x81\x8d\xc2\xd6_=\x11{4\xf4\xb6`\xc8\xc1c\xe9\x9f\xaf+\xa5\x83\x8dN\xfa\x96\x87\xa4\xbc\xd7\xa2-\xaf\xeat\xd5#b\x22v\xfa/\xdeaE\x9b\xa4\xb4\x0a\x09O9e\x11=&T\x1a\xf0^[\x82\x0e\xf8\xa2\xd3jU\x99\xd6%o\xd6~\x8b\xce\x1a\xb6 \x89\x0aKv\xb9\xa1F\xad)C\xaa4(8n\xcd5GU\xda\xd4j@N\xab\xc3f\xc4\xadZSmI\x0f\x02:\x85\xec\xf3'bvi7\xe6\xb6N)Q->,%\xe8M\x01\x1f\x14\xb3(*hU@X\xa7\x0d\x5c\xf39\x97\xbdS^\xa5f\xd5\xd84u/\x06\xd6\xa9\x17\x0d\xac4V|cvn\xa9\xf46`\xc8\xe8\x81MO\x04\x86\xec\xd2\xef\xfd\xbe)\xe1\xdf\xd8\xab \xebU{|\xd7\xb8ei9w\x8d\xb8%\xa0\xd90\x1es\xc1\xb2i\xef3b\xce\x0e-\xd2\xfae\x9c3'%\xa0\xdd\xd7\xec2\xe8\x8a\x80\xe7\xed\xb7\xcf\x0e5\xcaL\xdai\xceNs\xba\xc5l\xe9\x15w\xd7\xbc\xc3j\xdc\xb4\xd7\xa4\x8c\x1e7}PX\xb565&\xd5X\x94\xb0\xa8\xd3\xa2g\x1c6\xafGPXT^\x95\x8fZwI\xbf\xbf\xaeF\xd6o\x99Qg\xca/xQ\x87e\xd74\x18t\xbf\x17=$b\xc1nS\xb6\x91\xd1f\x8fF-N\xc9\x8a\x04\xaaU\xff\xca\xed\xc2\xbb\xfe\x7fN\xe8\xfeH\x06|\x1f\xfeB\xf5G\xb6\xe3\xa7L\x8b\x8b9d\x18\x17m8nD\xad\x03\x16\xd4\x18\xf2\xb0\xefx\xc0\x9c~+\x8a\x1a-\xa8\xd7(c\xcd\xa7\xac\xfa\x82S\x8e\xcb;/\xa8^\xad\x8cf\x93n\x88\xa90\xe7\xb0zQ\xaf\xcb\xa8\x97\xb0\xe5\xa0Y9\x15\x9eU\xe5\xb4\x9cS\xce\xcbJ;dB\x971\xf3\xee3\xa9\xcf\x98\x94[\xca\x14\xe4\x055\x08\x0akr\xd1\x93F,\x1bvL\xde\x98\xcf\x08(Z1\xef>\xed\xf2n\xbb!f\xde\x1e\xdf\xf5\xd3\x16\xcdhsM\xbbIM\xf7\xd6v,J\xc8\x18ScE^J\x9fA\xc7\x0cL\x5c\xdb\xd9\x13\x18)\xfdX\x1fY\xff(\xf4`\xe2\xc1\x9a\x073\xe7K\xf0O\x03/\xa3\xe3\x93\xb1\xdd\xd1H\xd21\x017\xdc\xe7\x07\x9a\xdc\xf2\x1e[BvzZ\x99\xa2\xb8VQg\x0c\xab6\xaf\xd3\xaa\x9d\xe6\xcc\xab\x12\xd1\xe6\x15u\xb6\x84\xac\x08(\xc9\xdap\xd8\x05Mf\xd4X\xd1#%b\x8fk6T\xf8\xa4AY\xdb\xee\xa8\x14\xb0\xa1\xd3\xaa\x92\x80E\xf7\x99\x92\x95\xd7\xe7\x8c7\xfc]\x97\xadZ\xd5lV\xd1\x9a\x1ew\xf5\x99p\xd7;\x8d\x1buFV@\x9d\xfb<\x8f\x92\xc7\x15\xbc\xc3\x7f\xb4hZJ\xbb\x98\x10\x92\x86$\xad\xa9rK\x83\x9b\xf6x\xc5\xb2\x83f\xfc\x94vcv\xd9\xd2\xa3C\x97p\xae\xab\xae\xeeK\x97\x96\x7f\xcc1\xe4|\xe9|\xee\xfc\xd6\xf9{\xa7\xe0\x91\x9a\x97\xb7\xd9\xf53+=\xbb\xc3\x1b\x96\xd4j4h\xd1\x86\xfb\x8cI\xbb\xaa\xc6\xac\x84\xb8\xb4\xa7\xb4\xb9\xa0\xdbU\xedn\xa9\xb1u\xaf;\xab\xce\x0e\x0f\xfa\x9en\x87$m*\xb7j\xdb\x82\xe3\xaeaF\x95-[\x8e\x98\x15\xd0hNR\xda\x9a\x84\x1e7\x0ck\xd3\xe4\xb4\x92a=\xe6\x94\x8b\xab2f\xca)\xaf\x09h2/\xab$/\xa7E\x99\x92\x06i-&\xd5\xda2\xafS\x83+~F\xd6Q\xdf\x13\xf35O\x88y\x5c\xa5o\x09\xd9\xb0\xe4\x94\x0deZ\x0c\x08\xdbe\xc3-\xc7\x94\x1b\xb0n\xc8\x0fT\x18Qa\xc9\x88\x15\xa1@y\xe0\xc2\xe0\xda\xc5\x9fh\x0c\xe9OC$[,\x5c\xd4\xa1O\x8dNQ\xad\xaa-\xf8\xa1\xa8\xfbl{X\x8bUYu\xd6\xdcp\xdd\x86\x0cRj\xd5\xdf\xe3M\xa3aG\xe5\xac\x9b\xd4\xa9SP\x87&\xe5\xb6\xe5\x95\x09\x98\xb7\xa4S\xf2^)\xf2\xae2\xdd6D$%L\x1b\x11\x92W\xaehK\x8dQS\xda\xc4\xe4dT\x99\x96\xb0h[P\xa5\x80\x90\xb8\x11\x0bB\x9a\xcd\xaaTi\xb7&\xab\xee\x88\xfa\xaaZ\x1b\x9e\xd4b\xd4\x86i}\xf2\xaa\x1c\xf4\xdc\xbd\x02\xfc\xbb4\x1a\xd1d\x97\x88Q\xdb\x1eR).\xa6hKN\x8d\x92\xf9`|\xab\xe1Q\xae\x05~\x82Y\xd6\xad\xc2\xc7C\xfd\xa5\xce\xbfY\xde^\x0c',\xe8\xb0\xe0\x86\x0d!\x144\xd8\x105`\x01\x05A\x07\x8d8`M\xb9J)\xab\x0aJ\xb6\xe4\xe4T\x18\xb3\xe4\x88>7\xf5\xabr\xd7^\xdb\xdePk\xc3\xba\xfd\x12\xd6\xd5\xbb\xe3\x8e\x84\xe3\x86\xa4\xd5\x1b\xb1\xa1YB\x87\x0eK\xa6\xd5+\xa1Q\xd6N%?\xb0K\x5c\xb5\x01\x95\xb2\x0a\x96Q\xd0\xea\x8e\x80\xc3\xfe\xd4\xcf\xb9\xa6R\xb9\x19\x95\xda\xd5+\xd7\xe5\x86\x94;\x0e\xb8`\x97\xbd\x96E-\xdb\xa1\xca\xb2\xbc\x15\x87]Ra\xde\x9a\x1e!\x11O{DR\x5cN\x9f)g]\xb4\x1e\xcauv\xfe\xc7\xaf\x17\xe6\x0ao9C~5\xfe?\xfaf0\xb4\xb3\x22\xde[\x0c\xfd\x85\x0d\xfd\x17\xacz\xb7\x87\xad\xa8\xd3\xe9\x0d\x11\x17\x94\xec\xd6\xaa\xa0\xd5\x1b\x9a-\xdb2n]\xc2\xb6\x92\xb4\xb8\xbc=\xca\xed4o\x8fW\xad9e\xcbA9/\xb9O\x85Y\x09S\xd2Z\xed\xb5\xa2VR\x93c*l\xa2\xce\x80\x1e%\xfd\x92\x8e+(\xb3\xe4\x9dv\xbbd\xc3\xa36\xdcT\xa6LN\xed\xbd\xb3WfJ\xbd6\xcf:\xe6+N\x08\x993\xe8\xa4e\x0d.\xf9\xbf\x0d\x1b\xb3d\xa7MG|\xd3\x82\x97El\x0b:kE\xc8\x1f\xfb5\x19mbfD<\xe5\xdf\x1a\x91\x12\x140\xa0\xd6\x90S\x9atY-\x95\xfd8b\xc8C\xb1\x1f\xfc\x7fN\x0b\xfd\xfb\x86;\x99\xbd\xdd\x81\x7fX\x19\x9aT-\xa7A\xb7\xcbJ\xf6\x8a\xb8*\xe6\x86V%\xeb\xc6\xd5x\xdeN]n\xe9\xb3\xadRQ\xb5\x94\xa8\x9c2\x9bb\xbe\xeb\xb89\xfb,\x18V-\xa3\xc5\x03\xbecC\xb5\x16q9\xe5\x8a\x06ddm\x99\x13\x90V\xe9\xa6\x98\xac'\x0d\xa8\xb0bI\x9d>\xfd\x8a\xfe\x96\x17\x84\xd4\xd94g\xd2\xb6uU\xd6\x15D\x95\x14\x14\xbd\xc3\xb0\x88\x80\xddv\xc8XE\xc4\xac\xd3\xe2Z\xa5E\xed\xf6\x1dm\x02v\x1a\x17\xb0\xe5Y\x8f\x88K\xf8ON\xdb\xb6e\xcd\x11\x9d\xfeL\x99\x06\xe5\xd6\x15\x95\x9b\x91\xb0%\x14;\xfb\xef_I\xa5\x8ao9 ?\xf8\x1f\x0co\xed\xdd\xfe/\xf9\xc6W\x17\xea\x8f\xab\xf5\x86\x0e\x93Vujv\xd8\x0b\xf6H\xbb\xdf\xb0\xac\x0d)\x0b~\xc1\x9fk0mAQ\xc9\xb0#\xee(9(\xa3M\xbdN7\x05<\xea\xeb\xda\x9c4\xa8R\xbd\xd7}\xc8\x055\x22\x12\x1e\xf7\x9c\x8c\xb42564Y5\xa1\x5c\xa7\x98\x0d\x87\x5c3\xee\xd3\xeeXP\xa7\xd1W\xec\xb7f\xdd\xc3r\xb6m\xca+X\xb7\xcb\x86i{\x8c\x09\xca{\xafm\xcd\x02n\xd9+\xa3\xc9\x9ak\x8a.yP\xbfI\xb3NZ\xf0\xb4\x06\x15\x02>.\xe4%\x1f\xb3jL\xad\x82\xa0\x05\xb7\x9c\xb1*)/mIT\xd0\xb6@`\xc5L\xf9\x9do>\x14\x18\xff\xc9\xe9\x90\xe3{\x9boW\x16\xc7\x83\x01\x1f\xf2\x1d\x87,\x22\xa0A\x9b\x8bZ\xe4\xdd\xd6\xe1M\xcd\xb6\x0d9\xa5\xdf\xfdF\xc4\xe5\xe5\xd4[P\x90T\xeb\xa7\xad{^\xb9'T{N\x95\x9c^\x09WD\x8d\xa3VQ\xd0\x036\xbd n\x9f\xf3\x1e6\x8a\xb0I\xd3\xce9i\xd5\xb86\xdf\xf21\xf3z\x0c\xab\xb4m@\x9f\xb4\xe3\xde\xb4 \x88e!\x93Z\xe4tYUo]\xa3rw\x95\xdb\xb0O\xd2\x88\x1a\x09G,k\xb5j\xa7\xaf\xe9\xd6\xe8\xb6]\xe2\x06\x04\x84M;mI\xa5\x8c\x80\xbb\x0exS\xbbe!i}f\xb4\x9a\xf4\x90ox\xf0\xf6\xe7\xf7\xff\xc4\xb2\xac\x7f\x18.\x05\x12\xbf\xbeR\xcc\x04c\xda<\xa3\xca\x8a\xcf\x98S\xe7\xa6v\x05yE\x0f\xe3\xa7$\xac\xfa{\x86\xec1&\xa7\xcb\xac\x06\x0d\xf6\xc9\xfa\xa06Y;\xd5\x8ai\xf2]u\x12\x8a\x0e\xb8\xa1\x5c\xbd\xfd\xe2\xe6\xecS\xb4\xe8!S\xf2\x86\xbcK\xc6n;]\x11\xd3\xec\xb6J\x09a\x05\x7f\xdfs\xf2\xaeI9`\xda)\xf7\xcbx\xd0\xb4\xb4\xb0\xa45\x09\x8d\xaae\x8cjWt\xbf\xac\x82u\x15\x16\xf5\xdb\xb4i]\xb7\x8fx\xdcI\xef\xf4U\xbbl\xe8\xd1gZZN\xb7\xa2\xddVd\x14\xddrA\xde\x9b6\x1d\xb5aM\x8d5)7%=\xa3\xabpe\xdf[\xa6\xd4\x7f'\xb6\x95\xfdG\x7f\x89\xea\xfc\xc5\xc0\xef\x95\x1e\xff\xdb\xd1\xdfN\x95\xea\x02\x1d&\x1d4\xa5\xc95}\xe6\xb5\xb9\xe0\x94\x80>c\xea\x0c\x0b\xdb\xc2-AEYQ\xb5\xd2\x0a\xea\x8ch\x93q\xbfv\xcf\xca8m\xb7g\xcd9\xa6\xdca_\x14\x12\xb6\xa9\xa0\xa4KHJ\x8b\x97\x1d6\xa7\xd2\x8a\x82\x98\x09Q\xfbD\xb5\x1b\xd0\xe0\xaesn\xea\xb0j\xd9!C\x1a=\xea\xbb\xe2F\x15\xe4\xcdY\xd5h\xd3\x0ei\xfb\xb0i\xc5\x92s\xea\xdd\xd0\xeb\x92\xb8\x1a\x9d\xb6\xb49o\x87S^ReD\xc1\xc7}OQ\xab+zT\x19\xb3\xa6\xdb\xbaj\x0b\xda\xdc\xb4O\xbbg\x84\x94\xab\xb0\xaeR]~:|\xba\xe37\xa6\xde\x92\x18\xf2\x9e\xe2\xdf\xfbK\x8b\x00\x97\xfdF\xe7\xccwC\xf9\xf9P\xa5n\xe5Fm\xa8uC\xda\xaa~\x0d\xca\xdc\x115\xa9hK\x8f\x80\xc3\x1a\xdc\x95P\xd4g\xd0>K\xdam):\xa2F\xceMe\x0e\xb8-\xaf\xd9\xb0z\x8b*\x14-X\xb5\xa1\xe7\x9eKA\xc8\x86e}JJ\xe6,h\xb3,f\xdd\xb6\x98:U\x02\xb26\xc5\xd4Z\xb4\xa9\xc9\x8a%\x0b\x86\x1d1\xafIF\x5c\x8dQ=\x06\xf4\xb9,\xae\xd9%\xb4xV\xad\xddv;*j\xc0\x07\xac\xbb)\xa6\xd6^\x05\xafxP\x87U\x8f\xfb#uRz\xd1e\xf3\xde\xaf\xda\xf6\x92\x06;\x0dJ\xaa\x13\xb7Q\x8a\x07\xa7^\x1c\x19|K\x00y\xe6/\xbd\xf1\xdd\xd1\xd3\x0d\x93c\x9b\x85\xad\xc8Iak\xe6m\xeaq\xd5\x0e[R\xceI\xc9\x0a[\x135\xa5^\xdc\xb8\x82E\x19%k\xc8\xdb\x10\xb0\xa4\xde\xba\x19}\xf2\xc65\x8b)\xb3(\xe7\x90\x98a\xe3\xb64\x1bvB\xb9j\x95\xf6\xba\xa4U\xa5Ji\xfdZE\x8d\xd8a\xdeC\x0a\xaa\xddR\x94Ui]\xc2\xac\xa2>1\xd5\x86U\xe8\xb5,\xad\xd2\x92rI\x0d\x12\xba}\xc5{L*\xe8\xd1/\xab\xdbIY\xe5^U\xed\xb0?\x970$d\xde\xa4f{|\xd5\x09cn\xfa\xa8\x05\xbd\x08\xca\xa8\xd4\xafO\xc1\x84\xa0.Sz\x1d4/b!\xd8\x90ii\xf9\xce\x17\xa7\xf4\xffxc\xc8\xe1\xe0\xb3\xd9\x89\xf3\xe9H\x22\x1a\xb5-eHD\xd1\x1d11)G\xcc\x0a\x1bWo\xc4\xa6\xb4[\xaeYRa\xd1\x13\xca\xb4H\x0a\xa0[\xc8\x94F\x8c\xbaORF\xa3\x01a3V\x1cQ\xa7BHZT\xd2\xbc\x1d\xd6u\x8b\xb9m\xcc\xa4i;\xad\xb9\xeb#\x82\x12&\x1c5\xe0\xb8:C*4j\xd1x\xcf\xfd#bAJ\xbfy\xf5\x0a26\xac\xa9uE\xcaI\xdfS\xee\xa41\xfb\xed\xb6C\xab\x1a[\xdeZ\xe8\xf9\x00\x00\x00 \x00IDATg\xd3\x0d\x87\xddv\xbf\x05\xb5\x1e\x15q\xdbg\xfc\xd6\xbd\x95\x03?eH\xaf\x15\xfd.:f\xc2\x11!a\x9f\xb4K\xb9[\xce\xb9#l!t\xe9\xc4_\xad\x1c\x8f\xbe\xc5i\xef\x7f{\x9d\x08},0\xf7J\xc7\xa1d\xa0;\x906)\xac\xc2\x9a\x92\xa0\x0d)\x15F\xd5\xd8\x83s\xca\xec2\xa6^\x99\x98\xb4\x0e\xb7t\xba\xad]F\xc4]\xadR\x8a\x22vyIF\x95J\xf32\xc2v\x9bw\x17)%\xe5\xaa\x04L9\xe8\x92\x159\xb5n\xd9k@\x5c\x95K2\x02v\x88\xda\xd2jX\xd7\xbd\x81\xd06AQ\x9bVL\x88\xcai\x924\xa1\xc2\xb6\x163\xf6\xb8!\xaaRLH\x83\x1d\xb6\xc4\xac\x18\xd2j\xca\x8a\x92\x98\x0d\xcb\xceYrE\xaf\x01\x9b\xba\x0c:h\xd6W\xfd\x82\x97\x9c\xb4\xd7\xa2#\x86\x5cT\xa3\xc2\xb0\x8cr\x197\xed\xb3j\xc3\xc3e\xfb\xff\xddTz4\xffc\x04\xe4WBo\x1ch\xf9W\x91\xc0\xa9\xc0\xcb\xba\x05\xac\xabRa\xc2\xfd\xc6d}V\xc4\xe3\x92*}]\xab\xa4\x9fq\xdc\xab>\xec\x87N*(S\xaeR\xd8\xa8.c\xce\x98\x10\xd2l\xd3\xb6F\x05\x8fyCB\xd8\xb2\xbd\xae\x8b+\xda\xe5\xaez=\x9e\xd5!/)\xe3qo(\x09\xa0N\x83E}\xf2z\xfd@\x85\x06\xf3\x16%\xe4l\x98\xf7A/\x0a\xdb\xb6\xdf\x9b\xc2XwBF\xc8\x86z\x0b\x1e\x12\xb3G\x9b\x8fk\xd1\xab\xdd\xbb\xf4\xdae\xcb\x09O\x9b\xd5\xe2e9\x0f\x9bvD\xd6\x09-.xX\xb5\xab\x22&]r\xc6\xf7\xfd\xaa=ny\x8fk>\xe5\xcb>mFJ\xa7\xea@\xc4\xcd\x8a\xe7\xbe\xf1h`\xec\xc7\xa5C>\x18\xf8F\xe9\xcc\x1b\x95\xa7K\x96\x1cuWH\xcc\xa6\x84*\xb7\xf49nY\xec\xde\x0c_\xb3\x174\xf9\xbe\x06\xdd2r\xb2\xda\xcd\xeb6\xad$eI\xbb\x90\x8c\x88\x9c\xe3\x9ew\xd6A_u\x9fY\xbb\xbdiNP\x87kz\xd5h4\xae\xd3\x0bZ\xad\x8b\xa8\xb0h^\x97\x06q\xc3*4y\xc2\x0d\x19ikB\xf6\x9b\x17\x154\xa3\xa0\xd3\x80&\xb3\xb2\x12\xc64\x8aH\x8a\x89\xe8\x93R\xa9CP\xabA;,\xc8\x8b\xa9\x16Q\xe6\x0d\xb5\xd6\xcc;m\xdcI\xa3\xb6M\xeaU\xaf\xd57\x1c5\xee\x9d.\x98\xb4\xd7\xb2Fy7T\xaat\xcd{\x9cWiS@\xab1\xe5c\xcfu\xef\x08\xcc\x97~L1\xe4\x1b\xa5\xbf\xf3`\xe3\xe9\xf2BJ\xc9\xb6\x1dr*%\x8c\x88x\xd4NIW}\xc2\x96\x05\x0b^\x90\x13q\xc2>qi\x01KnKY\x93\xb6- lQ^A\xdcQ\x01QE\xcd\x1e4\xae\xdc\xb0\xfbu\x0a\xc8Ik2bZT\x9b&aQ-Z\x95\xf9[\x8a\xae\xc9+9`\x5c\x87\x8b\xae+\xc8\xaa2$\xab\xe4\xbaz-\x9e\x177\xa8\xa8E^\x99\x15\xebrR\xee\xd3\xeb=\xea\xfd\xa2\xe3\xc6u\xebw\x9f*-\xd6\x8c\x19V\xee\xa6f\x9db\xda\x9c\xd7\xed\x94+\xc2\xaeZv\xd8%\xdd~G\xccN\x9dv9\xe2\x92VIq\x1d\x06\x95\xd9vF\xce\x84\xd5b\xb2\xeb\x17\x1f\x9e/\xfd\x98\x1eYO\x84\xfbbS/\xd5E2\xb1e\xf5\xb2&\x04l\xebs\xda\x86e]\x16\xed\xf7\xacA\xa7\xcc\x9a\xd5\xe7\x82\x88\xa4\xa2\x1aI\xd5\x1a\x14Lkv@I\xbb\x94^\xe3\x12\x86\x1c\xd5\xafQ\xd45\xadR*-\xb8\xa1\xce\x92.\x0b\xaa\xf4Y\xb7%#&gI\xbfOy\xce\xafH\x98\x97\xb4\xe1\xa0)?g\xc2\xaa:\xeb\xaaU\x19\xd4'iZ\xbda{EL\xc8\xd8V\xafL\xc8#:\x0c\x99s\xda\xdfw\xcc\x1e\x19\xef0\xe0].\xb8OJ\xad\x82\xac5ur\xf2\xf6\xa9\xb1\xec\x9f\x18\xf2II\x074Hk\x90Uf\xda\x94W\xdd\xaf^\x8b9\x0d\x22VD\xddQ\xae`\xb7l \xf5\xec\xf0\xcd\x1f\x13 \xd5\xd1t\xa3_[\x89\xac\xdbm\xce\x82Fq\xb5\xaa\xec\xbeg\xed\xdbe@\xb9\xb3n\xc9\x09Z\xb3\xcb>%MJ\x0e\x19\xb1j[\x8b1\xf3\xb6\xec\x96\xbcgv\xd4aRL\x87I;\x0c)\xd8\xe1\xb2fM\xeeb\x8f\xa4e\xd5\xd6mZ\xb5\xa4\xdcI?p\xc8\xb89\xef6'l\xbfQ3\xd2j,\xda\xe5\xbc-U\xca\xdc\xd0iB\xa5\x0d\x9b\xf2bJ\xf2\x12*\xb4\xb9)`\xca\x80n/\xdbT\xf4'z]\xb2\xdb\xba\x1d\x92^\xf5\x88\x80\xb8\xe7\xecsQ\x957\x94LH\xd9))+\xa2R\xd1\xb2\xd9{K\x00\x1f1\xa1\xd9\x9a^\x93\xc2\xaa\xf4\xda\x16\x0b\xacgj\xean\xff\xc9\x8f\x09\x90\xd9\xfc\xe1O\x87~j\xaf\xa4U!\x9d:\xd4\xe8RtU\x8fM\x95f\xad*\x18\x10\xd2\xad^\x9d\x805\x8d6\xe5\xcd\xaaQT\x10VR%)\xa5\xce\x92\x82\x84\xbc\xfd\xeeh\xd5\xee\x9an![:\xcc\x1auH\xc6\x96%\xa7\xfd\xd0a7\xed\x97\xf3\x0eW\xfdk_\xd0h\xaf\x19M\xde\xe7E'l\x19\xb4\xa2\xcf\x84\x1djM\x98\xd3m]\x10QQs\xba\x85U\xab\xd6\xe5\x8eMi\xa7\xb5\x9aR\xebu3\x0a\xee\xba\xe4\x8e\xa4U\xa3\xde/\xed\xdd\xe6=)\xec\xac\x94\xa3\xc2\x1al\x99\x13\x11\xf0=U\xa6\x95t\xb8\xcf\x84\x8f{J\xd8\xb8\x92U\x19Uj,\xda\x16\xb1]Z\x89\xff\xec\xefv\x06\xaf\x17\xder@\xee\x0fL\xda\xf1\xf3\xe7N~_L\xd6\x01]\xa6%LX0#)dE\xaf\xb4\x06\x9f\xb2$jV\xf2\x9e)]F\xc8\xaau{lK\xaa\x90\x16\xb2\xea/\xb6=D%<\xee\x15\x95\xba\x8dI\x08\xd9\x16\xb7%,l\xcd!E7\x9dsUA\xde9m>\xe8\x05\x1f0/&\xa5\xda\xbc\xfb\x94\xb9\xa0M\xab\xb4Y\xac\xe8P\x89Y99\xd5\x96$l\x89\x8a(\xde\xeb\x96\xef\xf0\xbc\x80\x0d+\xda\x9d\x90v\xc6\x86\x84\x98\x11\xfbMx\xc8s\xe2\xda\xa5t\x1a\xb1\xc7\x84\x8cf\x9bZ\xfd\x91c\x96\x1d\x93\xd4\xa0\xdf\x09\xcf{\xcc\xf0\xbd\xbe\xcb\x16\xdb6l\xaa\xb6.\x15\xaa\xaa^~qz|\xb2\xf8\x96\x01\xf2\xb5\xf2O\x14\xbe\x5c\xa270\xe6\xe0/\x94\xf6\xa6t\xeb5c@\x83Y\xcb\x82\x1a\xec\x12\xd5+aM\x87\x11\xbb4\xa9t\xd6\x9f\x1b\x94\xb4!,. \xae\x88\x84\x02\xe2\x96\xec4\xa5M\xa7\xe7l\xeb6$.\xe7\xb4E\xd7T+Zt\xd4\x98\xa4\x93\xeeh\x92\xd0\xa9\xd7#\x0e\xeb3*&aC\x99&\xb7e|Z\xc6U)Q\x8d\x98\x95\x15\xd7%\xa0\xcd\x98\xa0\x9d*%\xd5K+\xa8\xb1(\xabEF\xbdMy#\x82.\xcb\xea\xd6/g\xcd\x867u\xcbz]\xb9\xbb\xd2\x06\x9cP\xa1ZT\xde&\x0eJ\x1a\xf4\xb0\x1b\x1a\xd5\xfa\x8a\x1a\x05\xcb\xees]\x5c\xfe\x9e\xf9R\x9b\xf5B\xd7O\x7f\xe3\x9f\xb5\x056\xde*@>QT\xfa2\xc6\x10\xaf.<\xf9\x98\x8c\xab\x96\xd5Z\x92\xb0m\x97u\x7f\xc3]\x8b\x02v\xb9\xdfW\xad\x99R0`PVQ\xc1\xba%q\xb326\xb4)\xb3%)&\xa5 dJ\x8fI\x09\xcd\xf2\xd2Rrv\x1a\x94\x905\xa5K\x9f)\x15\xc6\x05\x15}@L\xb3\xa7E\xect\xc9q\xd7U\x8b\x88Y\x92\xf7Y_\xd2h\xc0}&\xd5\x9a\x97V+oYF\xda\xa60\xe6\xcd\xa8\xd0mCN\x8fAQ\xd3N\xc8h\xd0\xe7]\xe6u\x99\x92\xd0\xe0\xb25\x07\x044\xcbj2\xaeF\xbf\xb8\x06%\x1f5\xe1>7L[Q\xee\xa6>\x11\x0b\x1e\xf6\xa2\xbf\xee\xa6v\x0bf4(Y1\x1f99\xf4\xc3\xebo\x19C>\x11\xf8Hi(\xf0?\x05~\x18\x98\xd2\xfd\xb9\xbe\x93\x97-\xab\x11\xb5(\xa9\xd1G\xcc;\xeb\xbfh\x93\x912\xe0\xaaSn\x18u\xd3\xb8\xb0\x82\xb8\xbc^u\xda<\xacS\x8f\xd7\x95\x84T\xca\xc8\x0ak\x11V/\xa9KRD\xc0a\xb7\x5c\xb1\xcb\xbc\x06Y!+r\xcae\xfc\xbc*\x87\x1d4`H\x9b:?\xe7\xbb\xea-KZQ\xab\xde\xef\xfb\xfb\xb6\xef\xbd\xa9\xdc\xd4\xe7\xfa=\xd7\xb6FA\x15rV\x05\xd5\xab\xb0*\xe8\x8c\xcbv\xe9\xb4\xa5ZL\x8f%i\xf5\xb2\xca\x8c*j\x93\xb6\xa8\xc2\x01\x19q'\x8d\xd9\xebu\x011_\xd0\xe9\xcf|\xe4\xde8\xd1\xa2f5\xae\xe9w\xd4\xef\xa8\x135\xaaU\xd2\x98\xea@y\xb1u\xe3\xf27\xff\xa2M\xea-\x00\xe4\xcb\xa5\x7f\x15\xd8\x11\xd9\x0an\x17_U\xbf7\xfb\xae&{\xfd@\xaf\xa8\xfd\x02bR\xd6m\xaa\x95\xb4b\xcb\x9a7\xcc\xebR\xee\x8c2\x1d\x86u\xd9\xe1\xb4U\xcb\x86\xe5\xf4\x09\xcb\xd9Vk]A\xda\x96f\xb7\x9d\xd1\xe9\xa2\x8f\xfa\x862\xfb-\x9b\x15\xb2e\xddCF\x8c\xf8\xcc\xbd^\x91\x0e\xfdV\xec3\xece\x8f\xaa\xd4e\xd9qo\xd8\xa1\xd6\xc3\xfe\xcc\xcf\xb9&%,l\xb7\xdb\x82\xb6\xd4\xb8+\x8f\xa0*\xdc\xc2\xb6\xbb\x1e\x15\xf2=U\xaek0\xa5\x071\x0d\xca\x9dr\xd3\xbcV!\xb3\xeej4\xa0\xd2\x86\xd7\xdd\xef9\xed\xeeZ\x904$/\xee%\x1f7\x22\xe7C\x96m\xfb\x80*\xd3\xf7\xba\xb7\x1e4k\xa9\xd0^W\xfe\xbb1\x97\x8bo\x91R\xff\x97\xf1\xffe\x1b\x1e\x0df\x1e\x0b>\x97\xb3\xa4K\xaf%q\xed\xee\xaa\xd4\xae\xe4\xfbf\xb5\x8aI\x98\xd4&\xa3\xca\xb0J3\x9e\x900\xaeVZ\x5c\xcc\xa8\x92:\xb3\xe6u\x98\x16G\xbb\x15\xd4\xeaQ\xe9\x82\xc7\xbdbF@L\xce\x96\x87\x8d\xb9\xed7,;j\x9f\xb0\x7f\xae\xcd}\x92\x22\x1a\xdc\xd6 g\xd3m]&\xc5\xec\xf4>\xbf\xa9\x03\x7ff\xce\x8a\xf7zN\xafM\x94\xac\x8b\xdaP\xd2,+\xa3^T\xc4N\x0b\xbaMI\x18\xf1\xa8\x01\xcd\x02\x1a\xb4\xf8\xa6A\xef6\xa1\xc1\x9a\xcfy\xcd\x82'=\xeba\xb7=\xe6e\xdd\x86\xf5\xf9C{\xfdPH\x85I'\x8dX\xd2\xa0\xc3\xb0\xc3\x86,\xaar\x9f\xefk:\xb3~\xed\xa5\xed\xb7J\xa9\xb7\xf1\xaf\x02\xf4'\xfa\xa6W\xc4\x1c\xf0\xb3\x02\xe8q\xd4A\x01\xcfx\xfa^\xfb~PV\x5cZ\x8dM\x09\x0d\xfe\xb6\xb3\xe2\x22\xc6T\x18\x11\xd2\xaeNZB\xa3\xa0\xbcFk\x0a\xf2j\xac)8b\xd5\x97\xecP\x14\x95\xb0\xa4\xd2\x909\xff\xdc3\x1e\xd7m\xdb\xaf\xd8\xe1\xa4y\x19}&4+\xaa\xd1\xec\x13j\xd5X\xf7\xa2\xbf\xe1\x03\xcey\xca\xff\xe1Aa\xaf+3/mQZ^X\x99\x80Yk\xb6\x8c\x09i\xb4O@\xdce\xedR.)\xf3]S\xd2^\xf4)\x9d\xae{C\xbfa\xff\xc2!5\xf7\x0c\xc2WM\xf8=/y\xca\xd7,\x1a\xb2\xdf\x84\x0aUf\x959+%+\x22g\xdd\x11\x07\xddrL\xf1\x9d/m\x7f:\xf0\x161\xe4\xff\xbd:\x83-\xdf9\xf5\x9emY\x95\xa2\xa26T\x99\xf5\xbaV%\xf1{\x15\xd5\xb8\xdb\xca\xa5\xe5\xc5\x9dT0\xae\xca\x80^\xb3\x96\x94\xd4\xcb\xc9\xcb\x8b\x98Wvo-_\x9d\x19\xbb4\xd9\x90\xb3\xc3y\x11IeV\x1d7\xaa\xc6\x87\x0449\xe6\x1f{\xbfZ\x01\x11a\xebZ\xcc\x0a\x8aZ\xd0\xe3\x88I\xcf(z\xc6\xb2\xf7\xfb\x19\xff\xab'\xe4\xfc\xba\x1a\x09\x05\x9b\xce\xb8\x22\xabK\x9511\x09s\xa2\xf22vi\xd3eK\xda\xaa\x05\x95\x16\x14\xdc\xaf\x5c\xbb+\x1a\xf4\x1bR\x12\xf7\x01#\x0ey\xc1\x83\xc6\xd4\x9aP\xe5\xbc\xd3\xd2\xe6\xc4,y\x8f\xafz\xc2e\xcd\x164\x08*\x9a\xd2*\xa9(w\xf1\xa9\xfb\xde\x13|\xa6\xf8\x16\xe9\x90\xdf\x0b\x9c\x0f\x7f\xa2;\xf8\xcc\xe3\x8fT\x96f\x02\x09EE#\x22.ZS-\xa2\x1a7\xec6'oB\x99\xb8\x83\x8a\x96\xbd!gU\x9dq\xfdv\x08\xdb\x16\x91PiF\xbd}\xe6e\xe4m\xc9\xa9P'\xab\xc7\x05eBbvX\x14\x12\xf5!m\x1eT\xed\x8f\xe4\xb4\xdb\xb6\xa1I\xca\xa6\xa8\x98\x88\x8c\x06k\xbec\xa7'=+\xe8\xae\x84\xff\xe4\x90Y\xcf8`CP\x19\xf7\xbcD\xd7\xc5\xeei\xff\xbf\xd8\x10\xb1a\xdd\xa6Mq\xcd\xe6\xed\xbf\xb7\x91~\xc1\x1d\xbb\x04T\xe8\xd3/&oP\xc4\x01\x19{\xad\x09\xd9\xd0\xe4\xb6\x0e\x13\xaa-\xeb\xf25EI\xad\x06\xb5KI\xcb\xaa\xb0(f;W\xd3y\xfd_~\xa6\xf0\x96\x09\xc3o{_\xf7\xd2\xd0;\xda\x16e\x03\x0d6\xec\x13\x11\xb1\x846\x9b\xaa\x15m\xdbm\xcb\x965E\xb5\xf6\xdar\xce\x8bN\xdbRa\xdc\x09\x1f\xb5\xe0\x01AA\x95\xb6m\x0a\x08\xd9\x16R\xa3\xdc\xba\x80\x80^7\xb4[Rn\xd2\x9c\xa3R\xf68\xa1\x5c\xbb\xa7\xd4\xeb\x10U\xaf\xc9\x84\xa8\x1aSb\xd6\x90pG\xa7*+^r\xc7^\xf3\xf6YqQTTP\xa7I\x1d\x8a\xeaU\x8a\xa9r\xc7\xbbMH\x98tHQ\xd1\xea_\xe8\x06\x87\x0d:*\xec\xa81-^\x104oE\x8b5K\xd6\xe4\xcc;\xe9\x8e\x0a9\xb7\x0dz\xd0\xb3\x0e\x8a\xabt\xd7neZ\xddr\x5c\xd2\xb6J\x0b\xe2j\x0ch\x0bd\x02\xffvb\xe2\xcd\xb7\x04\x90g\xe3\x99\xc0\xc1w\xaf\xbe\xf1\xae\xd2\xd7\x03\x8f\x98\x91\xb5\xe8\xac\x17-\xea\xb2eCF\xb3\x05;\xadk\x96\x107\xaf\xc7\xa2S\xfe\xd8;\xbc`S\xb7\x87\xe5\x5c\xb3`\xdd\xa0&\x93\xd6\xc4\xe5\x0d:\xe2~\xd7PPg\xb7QSv\xb8!\xa0\xd2~[vj\xb4\xd39\xdf\xb1n\xceq\x9d\x06\xcd9h\xd9\x96\x03\xee(\xd7d]\xd8\x09\xd5~\xdb\xb4\xbcv\xdd\xa2X\xd6$c\xc6~\xd3\xf2r\x16,HZ\xb6*\xa5$ `J\x97\x11;\xb4\xb8\xe4\x84[\x1e6\xe8g\xcc9b\x9f\x0ey\xfb\x8cx\xd4\x0d\xcd\x16l\xa94\xa7E\xd6\x15}R\xd6\xbc\xcf\xa8\x11\xdb\xf7\x14z\x8d\x1ew5\xb9c\xc4Acrv\x19\x0e\xc4\xb7\x1b\xaan\xfe\xc9p\xe0w~\xf4\x80|!\x7f\xe2P\xe6\x07\xe7\xb6g#\x07\x5cR\x86\xe3\xbej\xa7\x07L\x0a\xd9\xe9!\x0d\xcad\x9c\xf3}\xab\xa6\xbdO\x97\x88\x17tzJ\xc9\x01i\xd3\xaekWT\xa1\xd7\xb8Y\xfb\x0c\xa9W.\xede\xdd\xda\x8dJc\xc1\x11\xb3\xea\xc4\xa5\xa5|X\x99\xc7\xd5\x1a3\xa2Q\xbb*W\x9d\x110\xeb\xb89\x13z\x94d\xddp\xd6>\x9f7cN\xa5\xac\x0a\xaf &nL\xbb\x17|\xcc\xaaw;\xa3C\xb5\x8c\xa4v\x03:D\x85\x1d\x931k\xce^\xcfi\x95\xd6\xe5\x0f5zS\xd8\x8aN\xaf{\x97\x97\xec\xd7\xe1\x86\x0a\x13\xeam\xea\xd0\xa8\xd1\xaa\x1e\x17E\xf5\xd8\x92Wn\xdd\x92v\x19S>\xa3\xd6\xb8\x07\xad\xba\xa6VYnih\xf4O\xbb\x02\xcf\xfe\xa8\x019\x1a\xf9\xd7\xed\xe7\xaf\xe7d\xa2\x93\xd2Vu9\xe0\xb6\xa0\xf7\xbaj\xafQ\xfb,\x8bku\xc7\xb45UNY\x94\xb2b]H\xd4\xe7|OTQ\x85Z\xadz\xd5\xd8\xb6)\xa4\xdc\xbc\xbc\x0dMJ\xa6\x84\x85\xc4\xad\xaa\xb6&h\xc6\x96'\x94;\xe6\xa0\x82\xaf\x8b\xdb\xa9\xc7+>\xeey\xd5x\xcda\xbb\xdcUiA\xc8#\xfe\xb3\xdb\xd2\xda\x9d\xb0\xd7m\x1b\xaa\xacJ\x88\xa9\xf1\xab&\xfd\x9c\x0b\x0ey]\x85\xb8\xe3^\xf5nW\xd4)zS\xaf\x14J\xa2\xb2V\x5c\xd5\xe55\x8f\xf8\x92\x13\xbel\xbf\xa0J1w|\xc4\xbaG\xfdgG\xdc\xf4\x80\x1b\x16\xc5\x04\xb4(\xb3\xac\xcc\xa2\xfb\xad\x1a\x14\xb5\xc77m\xcb\x1b6!\x86\xbaB4y\xeb\x0f\x0f\x05_+\xfdH\x01\xe9\x0a\xdc)l\xfej\xf7C\x9d\xc1\xa0fU\xba\x0c\x9b\x11\xd5\xe6\x0d\xcbn\xf9\xac~9\xb3f\x9d6\xa4\xda\x07\xadH\xb9\xadF\xb9\x8c\x16/\xe8\x14\x95\x96\x13\x913 \xe0\x0d\xdbH\x0a\x8a\x8bj\x94\xe5\xden\xd2\xac\xa2\x94\xbc\x88w\xd8\xb4\xcba\x19_P\xe1\xb4%\x83\x1e\xf4\x82\xfbm\x8a9\xee\xa25\x9d\xde\xb0\xe6o\xf8\x9e\x17\xee\xbd\x7f\x7f\xdd\xb0!{\x15\x95T\xda\xa7\xde\x17\x0dyV\xd2k\xa2\xfa}\xd4M\xfb=\xa7\xc9\x94c\x1e3k\xd2A7ee\xa5\xd4\x88y\xdc\x94O\x9a\xf3\xd3\x16\x1d\xd5n\xcaY\x9b\x9a\xbd\xec#\x9e\xd3\xe5\xa2\x16\x1f\xb2\xe1\xc36\xf5(\xa8\x934\xaa\xcb\x0dGL\xeb\xd0\xacd\xcd~\xd5\x0e\x1a($\x92\xb7\xfe\xe0y?b@V\xd1\xfe\xa5\xe6\xb27=\xe2\x0e\x16\xb5I\x99\x960\xafZ\x8b\x97\x9d\xd4\xa6\xceC\xbemD\x9f\xbc\x0ds\xba\xad\xb8k\xb7\x90\x88\xa8\xdb\xdaT\x09k\xb1b^J\x8f\xa8\x80\xa8\x0a\x9b\x1a\x84\x05\x05u\xd8P\xad\xa0\xc1\xba1\x1fRgZ\x8f?\x96\xf7N\xab\xca\xb5\x1b\xd5i\xd3\xb4}\xee:lV\xce]\x7f\xd3\x9a\xdfT# !'!k\xda\x1e+\x9a5XU/)b\xaf:\x8b:m\xf9\xa13\x06\xf4\xb8\xab\xd2\x84\xb4\xb0\xc3\x16|\xcc\xb2Sn\xd9\x924\xacUNZD\xc9\xba\x195&u\x9a\xd3k]\xcd\x7f]\x15X\xe5\xbc\x87Lx\xc0\x8af;\x14\xfd\x9c\xef\x8a[\xb1\xa8\xc1\xba\x80\x84\xef\xeb*\xc62\x7f\xe7\xf73\xc5\xd1\xd2\x8fT\x87\xfc\xcf\x81\xdf\x0c=\x99\xdbS\x98\x0b\xe5$u\xc8\x9b\x13Q&j\xd3A\xc3>nX\xbfj\x13J\xda\x84m\x1bTgU\xc8\x03v\xf8\x92\x90\x98#\xf6\xbb\xadF\xd4\xb0qE3jd\xe4\x04T\x09\xdbR.\xa9B\xb9%\x19\x05\xbd\x1eWpN\x8b\xff,\xe3}\x86U\x08(\xda\x92UTkY\xbd[Br\xde\xaf\xd2\xaf\xd9g\xc0\xb2j\x01\xf3\x16\x95\xecR\xd4nF\x85q\xcd\xa2\xee\xa83%\xafW\x85\x98O\xfam{=\xadK\xd2/\xbb`N\x93\x9c\x8cE\x15&\xf4ZsL\x9d5\x0b\xfal\x1au\xcc\x0e\x97\x1d3\xa9\xde\xb6-\xc7|_\xab>\x8bV\xadZ\x16\x12\x12\xb5,e\xc8I\xb7\xc5E\x05%\xb5\x9a\xcd\x96\x86\xda\x8f-\x15\x9e*\xfcH\x95\xfao\x96>\xf5X\x5c:\xd0.\xacL\xd6\x82*\xbb\xec2,\xe1\x96\x1e\xdfV\x14\x93\xb3\xee\x90v\x1b\xae\xca\x0a\xe9u\xca5\x7fdK\xd8c\xea\xdc\xbd7\x09\xfb\x1dC\xf2J\x22\x96\xd5\xab5\xaaUHX\xaf\x16\x8bv\xa8\xb0\xa9$\xe4\x98.\xaf\x1a\xf2\x80\xa5{+.\x0b\xa2\x12*\xa4%\xa4m)xP\xb7?\x10\xf7us:\xc4U\x18W\xa1\xd6US&\x8c\x89KX2&m\xc5g\xed\xd6\x80\xa8g\xc5u\xe91#\xe8\xf3\xaa\xa4\xecsS\x9d\xbb\xc2\xb6\x8d*\xf9\x8a1\xb7\xb4\xdb\x16pD^\xd2\x11\x05\xd5:\x95\xe9D\x8d.\xdfS-\xafS^\x8d\x90A%\xe3\x1aL)\xa8\xb5)\xa4L\xbfw\x95\x96r\x7fT\xfcT\xe9G\x9e\xf6V\x9f\xab\xf8\xe0Z\xa9>0\xab\xd6\xb6\x90\x84Jc\xf6J\xeauQ\xb5aQ\x19\xa7\xcc+zS\xb9\x84\x9c\x06s\xc64\x0b\xdbv\xdc\x88vc\xe2\xae\x8b\xd8mJ\x9d.\x01\xf5\xc2\xda\xcc\xe8\x91W\xe5\xb6}V%\x04\x14\xa5\xfc\xa2\x9b\xae\xd8%\xa0\xa0\xce&\xa22\xe2\xc2B\xc8\x0b;\xe6\xb8?1$o\xa7\x16\x8bJ\xae(\x09\x88\xdd+\xcdtY\x10\xd5f\x87z\x95.\xab\xb3[F\xb3:\x8b*\x94\xcc\xabW\xe7\x1cr\x0a\xae\xe96)\xafLB\x8f\xd7\xedP.(\xae\xceuM\x02n+sS\xa5q\x0b\xe2n;\xe0\x8ez\x13\xcat\xc8\xe8R'\xa9\xc7\x9c\xdd\x86E\x94\xeb3\xea\x95\xdc\x87n=\xfe\xc7\xdfR\xfcQ\x03\xb2\xf7\x97\xb3\xc7\xd2\x81R\xa0\xda\xb8\x13\x1ep\x03W\xfc\xb2!\xe3\x9e\xb4e@\x8b\x8cu\xf3\x06\xecTn^\xafQ\xab\xe22\x1a<\xa6K\xd2~I'5\xbb(f\x87\xac\xa0\x93.\xe9\xb6\xe5\xb0V\x8b\xca,Z1\xa9A\xab\x1aY\xaf{M\x97\xa8\xacz\xcb\xb2\x1a\x5c\xd3kV@\xd4\x96iuNH\x9as\xd9\xb4\x805\xeb\xee\xaa\xb0\xa1\xd6\xa6\x9cj#2\x826\x95\xdc\xb1\xd7\xb46#:\x8dZ\x15\xb0.k\xd2;=\xaf\xc2\x1d\xcch\xb6\xa1\xd3\x9c\xb0\xa4\xa2\x09E9\x11\x9b\xf6x\xcd\x09/Ys\xccm\xadn\xcaJX3)a\xce\x9c\xbce\xc3\x12\xee\xba\xa1\xdd\x1b\xda\xdc\xd1 $o^\xad\xfd\xb9\xa7G\x93_\xec\x0f|\xfeG\x0dH\xc7'v\x1c\x88\x06;\x02%\xf7\x99\xf4\xba\x1e[\xfa\xfc\xb9cN\xf8-i\x7f\xc5\xa2Z\x1bBZ\x8d\xfd?\xbc\xdd\xf9\x97\xe5\xf7}\xd7\xf9\xc7\xdd\xean\xb5\xef\xd5U\xd5]\xd5\xfb\xaa\xde\xd4\xad\xdd\x92\xb5X\x8ee\xcbv\xec$\xc4\x0eQ&0\xe3\x0c!0\x01\xc2\x0c\x13\xc2\x048\x81\x01\xcep\x86\xe4@\x06HH`\x1c\x93\x00\x89\x8d\x978\x96lI\x96\xb5\xb4\xa4^\xd4{WW\xd7\xbe/\xb7\xeeR\xf7\xd6\xad\xbb\xcf\x0f\xae\xc3\xe1'\x1f\x12\xda\xf3\xfd\x0f\xee}\x9e\xcf\xf6~\xbf_\xaf\x97N\x9fsIH\xd4\x96M\x0f\x1a7\xa3\xc9\x8a\x98\xef\xf8#\x0fK\x8b\xear\xc8\x9ac\x1ePUR\xb4\xaa\xa8I\xcd\x0bz=\xae\xdbS\x1e\xf0\xa0V3\xd6\x95\xb4\xe8\xb6\xeaAo\x19R7i\x9f\x92\xdd\xf6\xf9\xfb\xbe\xa5()\x81-!KZ$\xed\xd2\x10\xd4#\xe7G\x8ci\xb5O\xd5c\xbe\xe5\xd3\xae\xf9\xac\xb4&\x8b\xce\xd9\xed\x9e\x90U\x84=dNQ]\x9f\xb2n\x87e%\xccX\x91\xb6\xe8Q\xb3F\xd0\xa7\xa1O\x97\x1e\x03j\x9es\xcb\x90\xb3\xb2\x06tZ\xd1\xe1c\xbe\xe9\x9cU\xe7\xe4\xcc\xda#d\xd1di\xe8\xee\xe4\x1f\xfc\xf3\xfb\xffR\x8f\xbe\xf8\xd0\xc9\xb7\x03\x8f\x07\xe6\xbd\xacG\xa7\x0f\x9c\xf1\xa01%\xdf\xf5\x97\xed\xf2\x96Iui}\xee(\xdbm\xc9\x92\x88\x82\x98\xa7\xed\xf7cr\x86\x1cq\xdb']\xd5\xafdQV\xd6\x88\xb8N=\xe6\x1c4\xa6n\xafg\xfd91O:b\xc0\x80fC\x86\x1cvA\x87I\xa7\x5c\xf0\xa85<\xed\xdfx\xc6\x09\x7fECR\xd6\x87\x05\xecv\xc1\x84gm\x1bS\xf0s.\xa1\xe6]Y\x0fx[\xc2\xfb\x9e\xf7\x1d\x7f\xd3\xff\xe6'\xdd\x126\xe8\x9b\xfe'\xaf)*Y\xd5f\xca3^w\xc4\x0dm&\x84\xd4\xb5\xda\xd2%b\xd2s:<\xe8{NY\xb1\xdbu]\xd2\xae\xca\x8b\xb9\xa5G\xabw-j7fX\xc5\x86\x0b\x06\xb5Z\xb0\xaa\xd1(E\x1e\xfa\xea\xbb\xdf\xf9!T{\xff\xeao\xde\xf8\xc2^\xf5@Y@\xd0\x82\xe7\xbca\xda^AG\xad\xe9\x93\xb5\xa9(`\xc3\xb6\x17-\xab\xb9&/,\xe4A\x11kze\xb4\xa9\x8b\xb8b\xdd\x8b.xN\xd4-\x15\xddn\xe3u_\xb0\xa1\xd3\xcf\x09\xa9K\x0a\xa9\x0b\xa9\x88X\xd7a\xd3\x7f\xd0n\xda\x015\xbb\xacy\xc5oh\xf8OV\x5c@\x8be\xad\xbe\xe7G\x9d\xf7%]\xf6Z\x90u\xc6\x15mB\x9e\xf3eg\xfd\xa1\xe7\xbd\xee\xbc\x0b\x1e\xf1\x96\xc3\xe6\xf4;\xe3\xb6\xa4\x195!\x05\x075<\xe47\x1c\x12RqW\x8f\xb4v\x05\x19\xbd\xe6\xf4x\xd6\x8a0\x22\x9e\xf1-\xc7\x8d\x89\x1a\xd0\xe5\x82\x0emR\xea\xaeH*(;g\xce\xd2\xf7_B\xf5\xf9\xe0\xea\xdf\xfa\xe0\xff\xfc!\xd4\xb2\xf6\xfe\xe2v\x7f\xb5\xa9&\xa9\xd7uy\x1f\x88h\xf3\x19\xe7\xachsSV@\xd9\x98\x92\x8f\x08(\xda@\x9b\x94\xa7U,;\xed\xaa\x17\xb5X\x13\xd4\xa4\xa1j\x97u\xdbZ\xe5-i\xb7\xee3\xd2\xbe\xe0)EqM\x82\xa8\xba!\xe1\xdf\xaa\xb9*\xe0\xb3z\x1d\x14\x941)\xae\xdf\x03\xde\xf1m\xef\xa9\x98\x12\xd2i\x5c\xb7\xb0\xbc\x90\x01\x97\x5c\xf7\x90\x94\xa0\xab\x12\xbe\xe2a7}\xce\xb6M\x9d\xb8\xa1KtG\x00Z\xf1\xe3^UT\x974\x82\x091\xa3\xf2\x92\xaa6%\xd5-:iFB\x8bq\x9f\x16\xb6\xcf!\xf7\xbch\xc2\x03\x0e\xb9\xe2{\xfa\xc5\xcdJ\x9b1\xa0\xcf\x96\xfd\xb22Z,\xc9\xaa\xd4s\xc1\xdc?K\xdd\xbd\xcf@^h\x1ao\x8c\xfc\x1f\xcd-\x9b\x91\x13\x22\xc6\x0c);\xed\xbc\x0e9\xaf\xa9\x1a\x97\x156%!\xec%\xeb6,+\xaa\x98rL\xb3#z\xc5t\xbb,o^E\xd1\x86\x87\xac8\xa2\xc9\x8aC\x9e\xf7\xb2*\xcei\xd5\xa9\xa4\x82\xa8\xaa\x19_\xf7O\xb4\x08\xb8l\xd3\xa4~u\x17\x0d\xfb\x9ew\x0d\x1a\xf2\xfb\xee\x89\xdap\xde\x1eA\xed\xca\xd6\xb4;\x22\xe1\x01\x9f\xf2u\x83vyQ\xce\xa7|[\xd4\xaa\x0f\x84\xbd\xa9U\xbb\x88\x94\xaa\xc3.j\xb2m\xd4\x0d=6M\xaaz\xd4\x94\xbc\xd5\x9d\xcc\xf64\x8e\x9a\xd3&&\xa5\xe1\xba7\xb5\xfa\xa6mk2\xe6\xcc\xc88\xe1mC\xbe+\xaa\xcf\x86\x15#\xd6uj\xd2\xadS\xc0\xd9F!8\xf4\xafoL\xdfW \xff0\xfa\x9b\xe5\x9f\xf8\xa5\xb5\x1f\xeb\x8c4\xb3\xf3#\x0e:gY\xc5\x94\xa4\xb9\x9d\x86\xd1\x80+~\xd6eI\xaf\xd9+\xabU\xce\xcf\xcb\x08i1m\xc8\xbcqq\x8b\xf6jR7\xe4\x86\x1eq\xdb6\xbd\xa9\xd3Q\x07\xed\x91\xd1\xa7*.,\xa0\xcf#\x1e\xf4moY\xb3(\xa7\xc9\xcb\x0a.K*\xfa)\xff\xd0=\x01\xf9\x9d\x98\xa4-\x87\x1ds\xdb-9\xfb\xbceR\x93~C\xde\x93\xb3\x82\x87]\xd7iZ\xb3\x80\x8a\xb0\x0eQ\xf3;\xef\xed\x05\xbd\xe6\x05Q\xd0dXB^C\x8f\xaaNE\x1bj\xe2\x06\xac\x89\xcbk\xb2\x22\xa2]\xda\x1ewemX\x16s\xd9ai\x11UQkZT\xf5\x99\x16\x10\x13\xaeM\x87\xfe\xf3_V\xb9\xcfgH\xcf\xf1\x0f_\xef\xadm\x85&-yP\xbb\x80\xba\xaa\xaa\x80e\xcb\xda\xe5\xec1o\xcb\xa7\xdcv\xc8\x0d-\xaa\x96E\x8d*\x8a\xdbp\xde\xa4\x1e\xb7\xec\xf5\x8eAQ{\x05E\xac\xed\x14\x15\xbf\xee\x80\x9a/(\xdb\xab_ALEPRUM\xd2\x8a\x9f\x93W\xd4\xa7\xd9\xb6\x9a >\xe6\xdb\xfa\xdc\xc5\x9a\x84U\x0f\xd8-\xa5[HD\xd0\xb4\x01MZ5LH\x1a\xf2\xc7\xfa]q\xc2\xa6Qc\xe6\x0d*\x88\x0bI\x0a(9#\xac\xa0\xe4-\xa3V\x8d\x8a\xeaQT\xb3!\xe1}\xed\xaa\x02\x16\xec\xd3\x10\x16sC\xaf\xa2\xba\xe3\xda\x14U\x85\xcc\xe92\xe0\xba\x90m\xcd:\xac\x8bj\x92r@@\xcc\x8d\xcaf\xe4b[ w\x1f{\xea\x9f\x8a>\xdb\xfd\xc4\xab\xb1r4\xf4\xba\x98v\xfdj\xfaDM\xab9lI\x5c\x97^\xcb\xca\xf6Z\xb3W\xc9\xa6U\xf32\xba\x8d\xe8\xd2\xac\x84\xaa\x15%\xcbBBfl\xa8\xb9\xa8\xa8\xdbu\x93:\xb4{A\xc6Y+n\xeb\xd2P\x17\x97\x15\x16\x94S\xf4\xcb\x22\x12Z\xad\xd8\x90\xf6\x80\xdf\xf6\xaa\xa3&\x1c\xf6\xb0\x83\xaa\x0e\xb9\xe9=\xfdf\xac\x9b\xb3.\xe3\x82\xef\x18s\xcf\x90\xcf\xeb\xf4SV\xecSWQ\xb1\xee\x84\xac]\x1av\x0b\xda\x94\xf3\x86\x82[\xf6\x09\x8a\xab+\xa9\x09\xb8e\xcb\xbc\x823\x1a\x8eI\xebW\xd2\xd0\x8f\xe7\xcd\x09;\xe0\x96\xb7\x14L\x9br\xc2]S\xba5kV\x17\xd0\xaeS\xd6\x03\xba\xcd\xbb+\xd9h\x08\xe4\x0e\x04\xef\xe3\x96\x15\x88\xed>\xe6\x17\x0e\x872\xba\xed7 o]\xd6\x80YW\xb4\x1a5\xaf\xa6\xe2\x80\xf3J&L\xda\xd40,\xef\x93\x96\x14\xb5\x1a4iI\xde\x88\x94\x84I\xa3\x1a\xdewF\xc6U\xdd\xae;i\xdaG-\x98W3$\xafENC\x02\x11aM\x12>\xe5\xbb\x1a\x82\xa2\x06\xfd\x84o\xd80\xe9\xa0\x197\xc4\xcc\xaa\xa2\xacE\xc2>M\x9a\x0c8$$\xa7G\xc8\xbf\xd2e\xccG\x9c\xb3d\xc1=]f\xf4\x992j\xde\xb6\x8a\x98.k\xce\xf9\xa6!7}\xc6\xfb\x9a\xac\xea\x95p\xd0\xa4\x0e\xdd\x96\xb5Y\xd7\x22/j\xc6=\x1fw\xdb\xac\xa4\xa0\xb2\x03viR\xd3\xac$!j\x5c\xdc\xa4\x8f\xb9cCI\xbf=\xae\xd6\x86Bw\xff\xee\xd3\xc1[\xf7\xabt\xf2#\x81w\xca}\xbfx\xfa\x91e[\x1e\xf6\x8e\xa7L\x99\xb6\xe41a\xabbv\xabY\xb4\xea#\x1e\xf5\x8a\x0d\x13\x02\x1a\xbaU\xbc\xe85\xfd2\xda\xa5\xb4{\xd7>E\x0b\x0e\xfb\x84\xbd\xda$t\xea\xf7a\xcb\xb2\x0ar\xce\xc8\x8a\x8b\xab\xban\xbf\xbc\x01\xaf\xabiCI\xab\x9a\x07\xfd\xae\xe36\xfc5K\xbea\xc6^\x93\x9a4\xcb\x0a\x08\xa9\x09\xda\xd2fBA]\xcc\x0d\xad~\x5cJ\x8f>\xdf\x13Rv\xc9\x17\x04\xfc\x887D\xe5=\xa2\xcbe\x05Cj\xca\xea\xdeqZ\xb3\xa2qu\xad\xca\xfam\xaaj\xb1$'\xa5o'\xf9j\xcda\x07-\xe8\xd7,\xad\xa4\xa2\xcd\x9c%sr\x92RJ\xbe\xe0\xa2N3\xd2(\xdb\xb2\xa0;\xb8]\x9c\xf8\x07\xbd\x81\xe9\xfb\x05\xe4D\xe0\x8e\xa3\x8f\xce>\xd9\xa1\xa0\xe4\x19\xff\xd0\xa05#\xde\xb2\xa4_\xab\xdb\xaa6\x1d\x92rA\x17;u\xa6\x92\x82uq_w\xc0\xa6\xeb\xd2zTM9\xea\xabR\xee\xd8\xd4\xe3\x03W\x5cU\xb2\xe8\x80sV\xb0\xcb\x8c=\xba\xfd\x81.w\x1c\xd6\x90\x10\xd0\x10V\x17\xd0\xe9\xaa\x8f\xfa\xba\xdf6\xa0dK\xcc\x8a\xa0\xba-I\x1d;\x03>\x8f:`\x9f\xb8#\x1a\xbe(!\xed:>#\xe2\x01\xaf\x18\xf4\x9f\xfc\x88\xbd\x16\xac\x9bv\xda~Wt\xc9\x0b\xfa\xa8E+RN\xfc\x97\xa1\xbd\x986{\x8c{QC\xb7f\x1d\x82\xaan\x996lRM\xdc\xb0\xb2q\xe7m\xeb\x17\xb3j@\xd5\xbfvH\xd5\x82'\xd5t \xe9\x9d@\xb4\xb0\xf0\x8f\x06\x03s\xf7\x0b\xc8\xa3\x81+\xc1\xdd}\xe1\xcff\x05q\xcf\x8a\x82\xc3\xa6= \xa7(#aZD\xb7\xb0\xb83\x82\xee\x08c\xdc\x19\x1b\x22R\x0ez\xc5\xb3\x96\x8cJZ\xb7\xedE{u8n\xc6A\xe7D\xdc\xd0\xe3\xb2^\x15}\x16\x1dpYL\x97\x94\x13\xfe\x9dy\x1dV\x854\x89\x08\x1bq\xd4\x8c\x0bF@\x8b\x9c^kz\xe4\x0c)Y\xd3\xa7\xdb\x88\x80E\x0b\xaa\xee\x18\xb4b\xd0-?\xe9\x82\xac\x9a\x155/\x88\xb8n\xb7y9\xdb64!\xa9\xdd\xb4nW\x94\xf4\xca\x08)\x09i\x135\xa0!`\x97\xebZ\xad\xca\xc8\xea\xd1\xaf(&\xeb\xac\x9b\x12v\x99\xd7mZN\x9b\x09Gu\xaahQ\xb4j\xc8\xaa5}R\xea\xaf\xad\xfc^B\xea~\x9d!WD\x22\x1f\xed\x9c}\xa9\xd7\x94\x94\xa26\xe7\xbc\xe3\x8cq-\x02j\xaa\xc2^p@\x8bU\xb3f\xd5uX\xf0\x11\xb3\x0e\x8a{\xce\xab^\xf2=\xcf[\x90\x91s\xd8\xb4\xa26\x93\x9aD\xdd\xd3\xe9\xa6~\x01\x055\xb7\xc5\xdd\x10\x91r\xd2\xbc\xa0Ns\xe6m\xab\x0b\xcb\xe8\xd0\xa2]\x5c\x15\xd3>aIP\xd8_r\xd2'\x15\xf48f\xd2A\x7flERY\x87\xc7}\xdbA\xdf\x12A\x8b\x1bN\xea\xdd\x09\x9a\xdc\xb0\xcf#\xba\x95\xad\x1aVV\xb1\xaab\xc2\xff\xa0\xcd\xac\x94\x1a\xdau\xb9i\xc2\x8a\xaa\x09\x1bR\xa2\xca\xf6\x9b\xd3dM\xdda\x13\x9a%-j\x96\x937h\xd9G]T\xd3j\xc6n)\x01y\x09\xd7\x0d\xda\x9cX\xfb\xbd\xe1\xc0\xf2\xfd;\xd4?\x1c\x9a^j\xfa\xd9BD\xb8\xcd\xb2C>p\xd0\xb2\x9c\x0e9yYg\x05\xbc\xae\xe6\x9efUIU%%}\x02\xee\xd9\xd2\xe5\x1d\x0fyW\xab\x9c\xaa\x16\xdd\xaa\x8e\xdb\xd4\xb0K\xd4\x92m%5\xab\x1e4\xab]\xd1\xa7\xdd1\xe6\x90\x97\x9d\xf2\xban\xd7\xa45\xfck\x01\xbf#\xeb\x0d\xeb\xb6\xf4\xb8$\xe3\x94\xff\xc5\x19\xfd\xbe&\xe6=\x15\x13\xb2\x9e\x92\xc4]e\x19\x0f\xeb\x92V2\xaf\xcf\xb6\x03\xaekUUrT\xc2\xba\xb8!W\x9c\x95\xd7\xa4Y\x5c\xdd\xbb\x86\xd4\x1c1\xadY\xdc\xb6\x13\x02\x9e\xb5\xe1Ii\xa7v\x02\xfc\x1a\x22\x1a\xect&c\xb2\x92\x82\x12R\x9a\xcc8l\xb7\xe5\x9d\x11\xbc\xbdZ\x055K\xe9\xbc;\xfd\xa5\xd1\xc0\xfc\xfd\x032U[\xa8\x8c\xf6n<\x99h\x84\x03\xed\xd2\x92\xb2\x1av\x09\x8a\x99\xd5+f\xdd~\x9b\x22\x06lY\xd6bS\xbb\x98\xac\x0eU\x9bv\xa9)\x08\x09\xf8\xb8\x05\x87\xe5-\x0b\xdbDV\xc5M\xa3\x9a\xd5]\xf7i\xb75\xbb\x22\xab\xc3\x94\xff\xd1\xef\xfa\xb8Ye\xad^\x11\xf45C\xe6\x94\xe5\xa4e=\xef\xef\xf8\x94\x98\x86\x86US\x065\x1c\x10\xd4i\xd2\x9a\xcfY\x96\xb6\xe65\xc7$\xac\xaa*\x1a\xf1\x15}\xda\xe5P3\xa6fH\xd0\x98\x8a\x05{li\x13\xb3\xb6\xf3G\xb7H\x9b\xb7(o\xca\xb2\xa2\xb4f\x83\xe6\xedr\xd0\x9a\xbc\xb0\x84\x0dM\x92\x0aX7`[\xc8)\x936$\xf5\xb8\xe5\xbc\x15Y\xd9\xef;h\x8d\xcf}i40{\x7fK'}\xc5\x8e\x9f\xcd\xeb\x0eT\xad\x1b5\xa7\xcb\xbaAc\xf6\xe3o\xfb\x9e6Ei+\x02\x82\x16\xed\xdai\xccnIH\xd9\xb4,\xae*'$\xe3\x90yy\x03^\x93\x97\xb5\xa4`KF\x97Qo\xdb\xa5\xd5]U\xc3\xee\xfa\xa2\xc7LbK\x8f=\x0e(XtK\xd6\xba\xbd\xfe\xa9'\x04\xe5\x94\x04,\xab\x09\x1a7o\xd2\xc7\xbd\xa7Y\xc5\xa2S*\xd2*\xf6\xeeh\x0d\xe7\xdc\x12sL\xc8\x8053\xfa\x04\x844\xb9f\xd9\xa7\xbc/!\xaf_\xd4\xa0U-\xb6\x9d\xb0\xe8\x84\x15?jAERZP@\xcc\x1d]\x0arR\x1epONE\x8b\x98I\xa3J&\x8d(\xd9\xb0\xe2yS\xdaD\x94L\xd9\xad6>\xfb\xa5\x03\x81\xa9\xfb\x0a$\xb00\xdb\xf2\xf9\xa1\xaey5}\xf2Flh\xffX\x0f\xc1\x00\x00 \x00IDAT\x96\xb5*\xe1\xc3~S\xc4-]\x8a\xf6\x9b\xd5&+`\xbfn\xdd.;\xa6dHRI\xb3\xb0&\xab\x86\x94\x9c\xf4\xb2\xe3F\x84\xb1!l\xd5\xb0)\xdd\xb2.\xfb1\x13\xda%\xb4\xcb\xcb\x89\x1b\xd3\xeb\x90\xef\x19P\xb7\xc7\xa0\x8a\xdf\x14\x13\x11\x16\xd6$f\xd9E!mV\x04\x8c9kX\x97E\x97=\xacU\xb7\xb4VieQiO\xf8\xc0\x8c\x88A\x83\x96\xb5xW\xc5\x93\x1a\x96%\x85$\xe4,\x0b8j\xcbi\xbf\xe3\x10\xe6\xadJZ\xd7p\xd8\x90\x8cMMZ\xc4%\xad\xdb\xb0G@\x9f\x80\xa0=\xee\xe81bSY\xbf6\xdfV\x11\xc4\xb6\x84\x0d\xdb\xe3\xab_\xea\x0e,\xde\xd7\xe9\xf7\xc6\xae\xa6\x81\x17f\x0a\x03\xb5\xa8iE\xfb\xccJ\xb9\xeb\x05?\xe5\x82>\x15\xcd\xf2\xaa\xd2\xe2\xd6\xb5Jh3a\xca\xffl\xc9\x9c\x9au\x9fu\xd1^\xad\x8eh\xd1\xe5;\xda\xd4D\xfd\x05)\x15i\x0fy]\xc9~y\x9d~K\xc4\x07\xd6\xac\xa9\x08xE\xb7\xbb\xee\x0a\x9a7\xe4\xb0[\xfe\xa9\xa0\x98\xb2\x9a\xac\xba\x8a^\xad\x1e\xf1\xb6\x86~[\xd6El[\x13S\x941o\xd1\x1dyu\x87\x84M\xba\xedy[Z$pG\x97\x94u\xeb\xd2f\xe4,9\xeb!+\x1aZ\xa4\x8c\x8a\x1b\xd7\xael\xafQ\xcf(\xf9\xb6~]6\x84\xcd\xe9\xf5q\x01\x0b\xb6\x85\xdc\x956\xe9\xe3\x8a\xaek\x16\x945\xe5\x19\x8f\x9b\x12\xd0%\xa9\xa29@\xf2~\x0bv6k\x13\x1b\xa7G\x0bgZt\x05\x86}\xdd^A\x0f\x98\xb6\xady'\x9e~P\xc3=!e\xc3J\xaa\x86\xd4\xbc\xacS]A\xd6w=\xee\xaa5\x09\x97\xdd\xb4\xa8\xa1\xc7\x98/*h\xd2k\xde\x01\xdb\xde\xb1\xcf\x96SB\x86w\x9cP\x0e\x08\x9aQuU\xc0\x90\x8c\xab~\xcd\x80\xa4\x86uMB\x1a\xa2\xa6\x0d\xfa7>\xaa\xdd\x7f\xd4k\xc8\x1d\x03>\xe4MA)\x87D\xb5jR\xb5\xaaU\xd6\xc3\xfe\xd0a\xdf\x92\x15s\xc7\xe3\x16\xcc\x08\xear\xc0]\x1f\xf5\xdbvI\x9a\xb0 \xe3\xa3^UW\xb5\xed\x92N\xaf(\xe8W\xd6.$nT\xc1\x94\xba\xa2\x16!\x0fiRt\xc1n\x05\x15A\xdb>\xe6K\x9a\x8d\xa8\xbak\x9f\x8a\xf8\xad\xa9\xdf?\x18\x98\xb8\xdf\xfd\x90S\x817\xbf\xba\xeb\xcf\xa5{:M; \xa7K\xaf\x0e3b\xa6\x91\x93\xd1&&!\xe0\xb4\xd3\xea6t\xdakY}\xc7\xa6\xfem\xbbu\xbbi\xd8\xa2V\x15aQ\xcd\x96T\xe4<\xe651\xc3\xae\xe8wC\xd4\xb8'\x8d\x9b\xd0'%\xa8\xcf\xd3\xfe\xc8\xaa\xa0_s\xc0\xbay\xbf\xe1\x8cV5\x01M.i\xb1l\xc3\x82\x07MKhU\x93sDJ\xbbk\xfa\x94\xb4\xb8\xe6\xa7}\xd3G\x5c\xf1\x84k\x86\x1d\xf6]\xcf\xb9\xae\xcd\x90\x0di\xeb\x1ep\xdb\xe3>P\x10\xd3gY\x9f6\x87e\xd0\xae\xc9yoK\x9b\x15\xb3\xe4\x90uW\x9c\xb2\xae.\xa4\xae&-\xe9QE!\xcb\xea\x0a\xc6|RZ\xd8\xbc}\xa6\xd0\xb87\xfb\xef{\xee\xe7-\xeb\xfb\xdf\xb2\xbf\x1e\xd9\xbc\x11y\xa9\xd2\x88\x07\xd6\x15\x1dUvL\x9feYk\x0eZ\x95\x95\x95\x91\x967)bSENLC\xcd.\xb7}Z\xc8a?\xe5\xaa\x874{\xd8\xc3\xb6-\xa8\xe9\xd2\xeb\x8e\x11\x0d\x09Qi\xcd\xca\xaa\xa6\xf4\xe9\xf3]\x07\x144\xb9\xe3\x90\x83\xbe\xa8I\xdd\xd7\xbd\xea\x9fh\xec8\xfd|\xdf\xdd\xa7\xd5\xab\x1e\xf0ua\xed\xf6\xd86\xab\xa2]\xd6\xb6\x9a\x84\xbc\x9f\xf6\xeb\x1e\xf3\xaa>[Z\xb5\xa9y\xc1\xcbB\xea\x16\x1d\xb2$/+\xa0fD\x93\x98\x15\x83\xa6\xac\x1bu]U\x97\x15U\x9f\xc4yUU\x05\xbb\xed\xf2\x8e\xa3\x96\x05\x94m\xe9\xc5\x9a-y#\xce\x9a\xd9\x990\xbb\xeba\xef\x1aq\xcet\xcb\xdc\xff\xd5\x19\x5c\xb9\xffc@\xc7\xfc\x87\xa9\xc7?\x11\xed\xcc\x85+\xce\x99\xb2O\xc9k2\x0a>\xeb\xbb\xa8)\x08\xea\xb3\xedE\x17\x0d))Z\x17\xb6\xa2KAAY\xd8\xeb\xe6,\xd9c\xc3\x96\xb0\xdb\xca\xa2\xb2\xa2\x0a\x06\xada\xbf%C\xe6\xbe\x7fp\xe9\x96\x957\xaf\xae\xe8_*\xa8\xfb\x96\xaf\xf9\xbf\xd5\x85\x85\xd5\xac\xca{O\x8bK\x8e\x9bp\xd6\x1d\x11[\x06\x5c\xb0O\x93\x03f\xb5+9&%\xb13pQ\xb3m\xd1]\xa7\xe45\xdbR1\xa9SU\xd7\xce\xc6\xb6 \xa1O\xc1\xb6\xe3\xe6\x1c1-\xa1,o\x5c\xb3^\x97Tt\xd82bR/\xaaJ\xca\xbae\x854\x84\x85\xbc\xe3\xc3\xaej\xb7)\xafd\x97\xeb\x1a\x8d\xf1P\xf6\x1fD\x02\xb9?\x15\x90\xffF\x05\xd5\xcf\x87\xfey\xed\x13[\xab\xb1\xde\xc0\xd3\xf2\xc6,\x8b\xc8);(bJ\xd2\xa6\xa2\xa3\x06\xcc\x1b2+\xacM\xd8\xa4MG\xac\xcaz\xcc\x84Q\xe32\xa2FQw\xd7\x8a\x92Va\x05!-\xaa*\xeajZ,\xea\x96\xd3\xa2\xd7\xb6\xa4\x01\x9f\xb4G\x9b?\xf0o\xfd\xba#j\x22\x82fL\x9aV2f\x8f\xd7\xbd\xe0u\x11\x9bNZ\xf7!\x974\xc9y\xd8W\xb4\x1aU\xd2!l\xd2\x8a\xac\xa0-\x1d\x0eI\x19@N\x9b\x92w\x10\xde\x99Q\xe9V\xd4anGK\xbf\xac]YfgB\xb9_\x9b\xa2\x09\xedzM\xe9\xb4 oD\xc1\xa8EY\xed\x12\xd6\x0c\xba\xe9\x80\x90\xb8E\xb4\xb9]\xf9\x5c\xe4W\x02\xdf\x08\xbc\xd0\xb8\x9f\xb7\xac\x9d\xef\x8f\xb1\xfd\x9dd\xe0q\xd7\xbcg]Y\x93\x88#\xd2\x96\x8dXCQT\x93\x8c\xa8\xba\x88m\x9b\xa6%M\xab\xc9\xaa\xd9g\xdd\x96a1\x93\xd6lX\xd6!nC\x9b^m\xb6t\xd8\xde\xf1\x9c\xeb\xd0bU\xd2\x98M\xff\xd2\x8b\xa6\x8dy\xcd\xfb\x1esTC\x5c\xc2\x9bJv\xcb;\xa9\xcb\x15\x8fxE\xa7]V,\x99\xf6\x96\xb2Yu\xaf8\xe6Y+\xe2\xee\x99Up\xcbA\x05\x0dS\xae(\xba$d\xc1^\xe3\x9e\xf0\xa2=\xba\xe5eE\xcc\xc8\x88(\xfa\x90\xa0\x9fp]YQ\xcd\x84\x0f\xb9%\xa3\xc3#\xf6ZS\xf5\xac\x0d\x11\xb3jn[\xb2).\xae\xcf]\x1d\xc6q\xcb\x86n[\x12\x91\xdf\xdf\xfe\xb1\xaf\xbe\xd0\xf8\xd9\xc0}\xdf\xb2>\x1c\xbaR\xfb\xf3\xbf\xb2\xf5\x85'jS\xc1\xa4\x86#\xdal\xa9Ki`KC\xd6~\x15\x19q[;\xa2\xcf\x8cnE4\x19P\xb2jU\xd5\x8a\x8a\xa7-\xe96\xa5\xa0hX\xd0\x82\x9a]\xee\x0a\xdboY\xbf\x8a\x80\xb0\xa0\x82\xb09\xb7$]wM\xd6\xcf\xeb\xb6\xa5d\xd6\xb2\x8ckZ}\xa0\xee\xa4Yg\xdc2\xaa(\xe2\x09[\xb2\x9e\xb1\xa4\xa2\xc7\x84Cf\xed\x17\xb6j\x9f\xc9\x9dbdP\xda~\x1fx\xd2\x97\x1c\xf2\xae\x05\xcb*\x82b\x0a\x0eI\xdbg\xddk\x86\xcc;aN\x0c{li\xd1i\xd2\xdbv\xebWt\xd7\x03\xb6w\xe4\xdc\x1d\xda\xe4\x5cr\xd4\xb2\x87\xdc\xb6\xa9\xc5)\x17\xb5j\x92m\xbc8\xf7\xf2\x17\xff~\xe0K\xf7\x17\xc8\xa7\x83\xdf\xac?\xfb\x8f7\xfe\xf6/\xd4\xde\x0e\x8d\xbac\xc9\xbauU\x9b~\xd2\xbc\x9aam\xf2Z\x9c\xd2\xa1_\xb7\xcb\x9e\x90s\xc0\xb6\x03>$\xa1O\xa7\x8c\xa8e\xcd\x98\xf7\x80\x05\xb7\x1d\x94\x160g\xc0\x1e\x1f\xe83\xa2\xa0K\xd9\xbc\xb8\xbc\x88-EK\xb6\xbdaVZ\x9f\x97\xd4e\x94\xdc5\xaf\xdf\x1fKj8\xe1\xe2N\xa4JF@\xbba\xb7\x1c6`\xd8;\x8e\xda\x12\xf0\x09\xd7m\xab[\xd1dZB\xda\xb0Me{]\xf7\x9c\xa2A\x19\x15Ek>a\x06\x9dV\xc5T\xb5[\xb7O\x87\xa0\x84\x9ck>kA\x9fe1iA{\xcc\xaa)\xea\x12\xb7\xaal\xdd\x09\xab\x82\x96\x11\x90\x17R\x93\xf2\x98R)\xfa\x8b\x17\xee}\xc9}=C~&\xf8\xbb\xf5g\x7f\xbd\xfb\x17\x1e\xad\xbe\x19~@ZX\xce\xa6\xb2&3\xdae\x05\xe4e\xb4;$\xa3\xa1\x22\xa2Wj\xa7\x14\xb7.d\xb7eq\xc3&= m\xceyo\x9a\xf5\x90\x8b\x92v\x190e\xcc\xa7,\xb9i\xb7\x82y\x8fX\x96RS\x14\xd2m[\x8f5m~\xd1\xf3\xa6\x5c\xd40\xe1\xbc/;kL\xb79CR\xca\x1a&=\xed-a\xcf\xee\x8cy&]\xd4\xa2\xcd\x8c\xc7\xcd\xfa\x88\xf7-\x9b\xd0n\x97o\xdae\xcb!\x9b\xa6\xec\x92vV^I\xd8{\x9eq]\xbf\x8a\xa0\x11\xf7\x04t\x1aW\x15\x91\xf2\xac\x97\xed\xd7\xaf\xdf\xaa\xa4\xf7\x9c\xb6l\xdan[*\xdal9j\xca\xaa\xe3\x96\xa5e\xed\x137\xeb\x01\x7fT;\x13z=\x10\x0c\xd4\xef\xe7-\xeb/\x07\xffU\xfd/\xfe\xcd\xa6_\xde\xaa\xd6\xc3IiY7M\xc99&/\xe0\x90>w=mC\x97\x11Q\xc7\xc5\xec\x17\xd4aT\xc4-'\x8d\x0b8l\xd3!y\xef\x88\xfa\x84\xbcmu\xb3z\x14u\xbb \xe6\x90eA\x04\xb4\xcaYU\x14P\x11rP\xd4\x03\xd6\xfc\xb4g=)\xe7\xdf\xcajr\xc9M\x9b.;\xe7\x9ecZ\xb1\xa4O]^\xcdn\x83\xf6x\xcb\xd3\xee\xd9\xd0\xe5\xae\xdf\xf4\x8e\x90[\xe6\xc5\x0c\x9aW\xd5\xa1KQ\xc2\x13j~\xc2\xbc\x9f1%\xa9dX^\xbbc\xd6\xf5K\xeb\x96\xb0\xa4\xcf\xaa\xba\x0e\x8b\xf6\x1auKF\x9f\x92\x0d\x9d\xfa\xe4%\xcd\x8b\xa8\xa9c\xc1\xb0\xb2\x15{\xa5\xf4\x08\x08\x9a\xd7\x1e\x0c\xfe\xfb\xb9?|>x\xef\xfe\x01\x89\x05\xdf\xae?\xf37\x03\xff\xa8\xa5\xfe\xb1\xd0\xa6\x80\xb3^\xf7\x98\xba\xaa\x8ce\xad\xaa\xde\xd5\xed\x86\x0d\xed\xd6\x04\xddU\xb0\xa1`RC\xc6\xb0\x0e'\x9d\xb2\xe8\x8c\xd7\x85\xb5I\x99U\xb3 #\xa1\x8c\x8c\xc7\x04\x5cUvJQFQ\xc0I9\x05\x01Q{\x0c\x8b\xf8gN9\xe8\x0d_u\xd0\xd7lI\x1b\xb4\xcbn\x17=n\xd5\x84\xfd\xa2f\x0c\xba\xa9\xe4\xa0)\x0b~\xd4\x9f\xa8hQ\xf33\xfe\x92S\xd6\xc4\x0dy_\xabI1E\xdb\x82RV\x95}\xd7q\xdfPt\xd8\xa2\xdbN\xb8b]\xc8\x82M\xdb\xca\x02\x9a\x9c\xb7,\xa3*\xa4U\x93V\x0b\xba\xc5\xddU\xd6\xae!/k\x97\x94\xa4!\xd7\x1d\xb6mR\xb3\x0d9C(\xe8\xfd\xd9\xde\x8d\xd7*\xf7\xb1tRm<\xff;\xc1_j\xab7\x82A\xef\xd9g\xde\x1da\x199\xfb\x94m\xc8\x88\xda0\xa0\xa1h]\xaf)\xcd\x96\x04\x05\x14\xac8-\xe8kj\x0e\xbb\xe5\x80\xb0\x82\xbd\x22vY\xd7$\x80\x88\x86\x88)!\xfb\xdc\xd6*\xa5G\xc5\x94Ak\xb6\xb4\x09\x0a\xfb{\xaa\xe2~\xcf\x0d\xb7\xbc- \xe3\xa3\x9a\xbc\xad\xaa\xcf\xa6m\x1d\xb6\xf5H)k\x97\xf0\xbe!\xab\x22\xe2\xbal\x09\xfb\xb6\x93\x02\x16\x14\x8d\xb8\xe5\x84\x0d\x09!\xadrZ\xf4\x9bp\xd0\x98\x93\x22\x92\x122\x16\x1d\x962\xa4E\x93\x88V5Yeg\xdc\xd3\xacbCM\xbfm\xab\xea\x0e(\x99\x95\x14\xd0!\xa7]TQ\x87\xb8sR:u\xe8\x924S;\xba\xf8\xed\xbf\x17.\xaf\xff)\xbdN~\xc0\xb5\xb7\x11\xf8\xd0\xafv\xffLO\xbd;X\xb2j\xc9\xb6o\x08\x99W\xd5\xec\x9e1\x9f2\xe8\xf4\x8e\xe3\xce\xe3^\x921\xe2\xb6\x07\x8d8o\xdb_\xb3\xae\xe4\x97\xf4\x9a6 *\xa5hU\x0b*\xd2fu\xa8\x0b\x99\xb0[M\xd1I\x09M\xd64\x1c\x11PUwL\xde\xcfj\x08\xf9w\xa6l\x9a7\xe4\xef\x182\xe9\x966U\x1bZ\xe5\x9c\xf5\xbeq'\xc4\x1c\xb5-\xa1\xe8a\x0b\xb6\x95\xb0\xe8\xa0\x92^O8g\xc6g\x8c\x19\x95V\xdd\x11\xe2\xdd\xf0\xa0\xdbN\xa8\xd8\xebu+\xaa\xba\xa5\x1c6cS^RE\xbb\xfd\x16dt\xc9\x0a\xd8\xd4\xaaK\xd03\x9a\x5c\xd4\xec!!m\x16\x0dhR0\xad(\xe4M[bVU\x85\x85\x03W\xbe\x9b\xdd|\xe4O\xed)\xf7\x03V\xc8{\x0fu}\xb1\xb3\xbe'\xd8PR\xb7!nV\xaf\xa2&{\x14t\xbb!\x89\x84fu{\x8d\xdbrD\xab\x98\xb3n\x0a\x8bi\xd5g\xd5AYe\xb7\xd0++\xe2\xae\x0dum\x826%44\x14\x05uZ1b\xc3\x90\x05a[:l\x09\x19\x90\xf2=W\xcd\xb8\xee\xf3~\xc5\xef\xd8\xb4mR\xd8\x09\x0b\xda\x05%\xa5\x85t\x99\xb1\xe2\x86\x8c=\xd2\xce\x19\x13S\x13\xb2,\xee\xa6+\x1e7\xe7\xb8\xdb\xc2\x9epU\x18[\xc2\xc6\xf5\xa9\xb8\xe6\x94\xacS6\x05\x94\xd4\x1d\x13\xb7\xa8\xdb\xa2\x86NM\x9a\xd5M\xdbce\xa7)\xd6,\xbfc\xa2\x93FMJ\x87\xb4f\x0d\x0deUY\x09M&\xea\xf5`\xe6\x97\xb2\x13\x7f\xfaL\xef\x1f\x00\xa4k\xf7\x99\xbf\x10\xb2\x15X\x94\xd6'g\xce\xc8\x8e\xfbUHJR\xd4\x8c\xb0q\xab\x1eu\xdd\x82S~_\x9b\xb0/\xab\xebP\x90q\xc1\xba\xeb\xd6D\xf4\xd82oD\xaf{\xa2\x02\x922J\xc2z\xe5\x95\x0cXq\xc4\xf7CPV\x1d4\xadYU\xc3\xb4\xf7\xacXq\xd2o9\xe6\x7f5\xeb\xbc\xefiuP\x93\x90\x80\xb0\x0f\x9c\xb0\xa1[\xd4\x97u\xe8\xb4e\xc8\xdb\xce)\x08\xb8\xe3\xbc&\x01\x8f{\xdf.\xffA\xbbEi+NZ\x95\x90t\xc0\x94\xb4\xc3.;mL\x93\x80\x8d\x9dq\xd1m1K\xba\xb4)\x1a\x92\xd1kY\xd4!\xaf9\xe5\x8e\x84\x82\xc3\x82;\xc9\xd4A)\xe7u\xda\xb0-\xa2W\xcb\xf7{,\xf5\x13\x8d\x97_\xfaL\xe0\xf6\xfd\x04\xf2\xe4\x81\xe6\x97\x0e\x07\x96\xa44L\xa8\xef\x84G\x9c\x111\xee\x8c\xb2\xabv\xcb\x8b\x8aj\x16\xd5o\xc1\xa3r\xa2\x22\x9a\xa4u)H\x1arD\x8f\xbc[\x9a\xad~\xbf\xa0 \xaca\xc3\xa0\xa2\x86\xd3n);\xabj\xaf\xa0\x9c\x94.aA\x9b\x92\x06m;-\xe3S~\xde\xbf\xf0'f\x8c\xb8`\xc8c\xca\xde\x10\x94U\x91\x10\xd0\xed[^sH\xd8\xaa\x19\x09\xfb}`\xc0\xb2\xa36\x14\xcd\xbba/F\xc5$M\xfa\x05\x1b\xc6\xecW5g\x9f\xa4u5\xe3\xda%\xc5\xac\xea\xb0,\xe4\xb8\x9c\x05'\xdc\x924%\xaa,(aE\xb7\x9b\xf6\x1a2)\xaa!\xa2M\x0eMf\xe5\xa5\x0d\xd9\xeb\x03\x8f\xbb\xadC&X\xfd\xe5[o>\x18\xb8v?\x81\xf4\xfe\x9d\xc0\xe9\xcb\xf6x\xc2\x1e\xef\xa9\x08j\xb3i\xda\x92\xa4\xa8\xb4\xc3\xd6\x1c1\xa7S\xdc\xbc\x19)\xeb\xdau\xa9\xe1\x93\xb2\x22\x0e\xbbnDF\xc4\x1e+F\x1dwOEJ\x97\x84I\xbb\xb4yO\xb3\x01s\x9eW\x14\xf1\x86\xd3\xeej\xb7\xae\xc7\x94\x90]\xaa\xfe\x96\x94\x7f%m\x5c\x93\xdb\xfa|\xda\xb8\x19\xeb\x92\x12\x9a4i\xf1\x1d!-V\x0d\xa9\x8aZRw\xce\x07>\xe5uus\xcez\xc6\x9b\x1er\xcd\x86\x03:|\xd3]\x1df\xd4\xed\xc2\x0c\xda\x9d6\xaeM\xd9\xa0\x8b>g\xd2\xac\x16{\xdcr\xc8\x9aGm\xe8w\xcd\x9a\xc3&=m\xca\xb8O\xd80`\x0c%\x1b\xf6\xdb\xadl@\x10!\xef\xd8o\xbc\x11\x0fL|\xfeT\xe8\xcb\xa5\xfbj\xf1w\xf6X\xe8\x99n\x97\xadYT\xd1)jA\xd9!+\x1a6w<E\x87\xa5u\xcbk\xc1Qa9\x0bNZ\xd6\xeb\xb8\x19G\x9c\xb2\xae \xe2\xaan=\xbe\xe2!\xef\xebSP\xd6aE\x1c\xc7\xac\xf8\x8c\xb7\xb5\x8a\x8bX\x911lM\xd1\xf3\xde\xd4\xa4\xe2[\xa6e\xac[\xb1\xdb\x19\x9f\xf2\xbe\xdbr\xfa\xdd\x13WS\xf5]Q\x0b\x96\xec3\xe4\x8e\x86\xa8\x05\xfb\x0d{\xdb\x19o\xc9\xebuU\xb3\xbc{\xc2\x1e\xf25\xedZ\xad\xa8\xab\xab\x1a\xb7\xcb\xa2\xa0\xbc!u[\xde7\xea\xbb*\x9e7k\xdb\x88\xcb\x86\x04\xd4-\x89\xa9(\x8b[\x95\xb3O\x8f\x92w\xed3\xe6q3\x12f\x0d\x9b6\xe0\x9e\x86\xb8\x80`9x\xf1\xc2\xaf\x7f\xa8\xfcg\x89\xbb\xff\x01@\xfa#g^\xdaj\xc4\x03\xabV\xf4\xee\x04\xdc\xf5\xb8\xe3Y%[\x22b2*6t\xe9\x113l^\xdd\x13:]\xf4\xb8E\x97\x0c\xb8\xea\x82%kn\x19V1\xedW}EA@V@\xc9\xa0\x82\x82\xa0}\xfe\xc4i\x0d9e\xcb~\xdc\xbb\xc2\xfa\x5cv\xd0\xacas65\xdc\xf3S\x12\xe6}Q\xc0\x8a\xb2\x82\xc7L(\xb9\xa6\xd5\xb66\xc7\xdd\xb4!\xa2&\xe8\xa0\x94\xaa^\x97|V\xab9)\xe7Lz\xd2~\xbf\xeb\xafz\xd9a\x9bF4kr\x5c\x9b\xfd\xcaVmh\xd3\xf0a\xd3FDl\xcb\x18rA\x9b\x90{\x06\xb4\x1b\xb0*\x22cS\xb3\x05a\xe3N\xea\xf6\x94?\xd6&\xad]@\xbb\xeb\x8e\x88j\x96i\xac\x84c\xbf6}\xf1\xcf\x82\xe3\x07\x02\xf9\xc8\xc2\xc5?\xbf\xb7y&T\x102!\xaaI\xc2\x966i1%\xc7,\x8a*\xe91\xa8\xcf\xb4Uq\xfbd\x85\xed\x92\xf6\xa8\x07Lh\xf6\x84e\x1b\x8e\xa8\x0az\xcc\x1d\x97t\x08\xa8\xdb\xa5\xa8*\xa9\xe0\xb8\x05\xa7\xa4\xb4\xed$\x80\xce\xeb\xb1\xe6\x90Eim\x96emz\xd8y7\xac\xdb\x166\xa9E\xafS\xfe\xc4\x86\xacf9\x01\xedV%ta[\xb7\x8c\xb8\x03V<\xe5\x926A\x01\x1b6\x0d\xb9\xea\x94\xdf\xb2KZ\xc5-}\xaa>\xd0lQ\xd6\xb6\xa8--ZU\xbd'(\xae\xea\x98y\x0d{\xed\xd50\xa1\xc3\x96\xb4\x8afI\x9d\x8e\xa8\x9a\xd4\xe6\xa2AyUM\xf2Z\xd4\x94-\xeb\x96Vi\x1c\x7f\xe9\xca\xb6\xfb\x0d\xe4R\xed\xc8\x87W\x0f\x0b\xc4\xb5z\xcc\x92\xaaE\x07\xcdK+\xea\x96\xb2$\xa1.h\x1dM\xfa\xdc\xd0\xacaM\x97\xbbr\x96\xb5\x09\x9a\x151`^\x19\xb7\xd4-+\x19\xb0%!`CL\xc2\xa2!q\xfbU\x84-\x98\xb2\xd7\x92~Y\x95\x9d\x8a\xf1\xf7\xdf\x1c\x8b\x02\xb6\xa5\xd1\xa4U\xd9M}\x1a\x82\xaa\x92b\xe6\xf5Y\xd8\x19\x7fh\x95\x17\xb3\xdb\x8a\xb4\xbd\xa6-;%\xa1jL\xc3M\x1dJ\xf6[u\xce\x15=(K\x8aK\x1avO\xc2\x9c>\x9f5\xaf[\xd1\x82&\xbdflK\xb8#b}g\xe5\xd5U,:a\xd2\x80m[r\x0e\xbb\xab\xac\xa1a\xd1I\xeb\xea\x81\xf6\xd7\xff\xe3\xbf|!0~\xbf\x81\xb0\x7f_\xea\x99\x5c#\x1fhs\xc9\x1e\x09\xfdn\x8a[\xd5'\xae\xb8\xa3\xe4H:.cY\x97\x98\x82v\xed>\xf0\x98{\xd8\xaf\xcd\xb4\xb2&%\x159\xa7\xe4\xac\x8b\x0a+kH\x88\xe8\x93u\xc4\xa4n5\x97l\x8a\x08K\xdbc\xc2C;s\xbbe\x9b\xa2\x82bf\xd4w\xd6\xd0\x82\xa0\xacn\x8b\xa2Z\xd4lk\xb1\xe8Ey\x015\x0dQ\x19q\xe7-\x185j^AZB]B\xdd\xb2\xbc=r\x0eZ\x125\x22%\xa2I\x8fqUMz]\x97pS\x1d\xfb\xd4\xdc\xb1OV\xd6\x80\xba\xc3r\x1a\x02J:w\xe4H%%Iq\x9d2\x0a\xe2j\x1e\xf1\x96\x98\xbc#?vim\xbc~\xdfW\xc8\xbe\xe0\xfbo\x0c\xff\x95\xfd\xf1\xb6@\xafis\x0e\xda\xb6\xad\xc5\xa8A\xcd6%\xaczD\xcc\x92\xa4\xce\x1d+\xa6nw=b\xce3\xb2\xae\x1b3,\xac\xa8[\x9f>}\x92n\x88\xeb\xb5\xa1\xae\x8cy\x1dj\x86mZ\xf2\x11i)5\xadJV\x9d\xb5\xdb\xbc\xf5\x1d\x99YFJ\x87\x82Nu\x155e\x03\xd6\x84\xb1\xad\xa1IP\xbf\x97\xb5I\xd8\xd6o[\xb7\xac\x8c\xd3\xae\xc9\x0a\x8b\x98S\xf4\xa0\xaa\x84&\x1d.\xe82\xa5\xdf6\xf6\xb9i\xc4\x8c!\xa3\xc6\x1c\xb4\xe5U\xad\x1e\xd5\xe5{x\xda\xbbZt\xb9'\x22\xa0bV\xbf\x9a\x0e\x8b\x16\x8d\xda\xb2\xa4_Q\xc5\x8a\x16\x9bb\xcaZ\x05\xca\x8d\xb5\x9e_\x1dm\xdc\xaa\xddw \xe9\xc6\xe1\xc4\xe0\xa5\xfcOe]uP\xdc\xa2\x9c\xa4\xac\x88\x82\x9b;\x16\xc5\x0b\xb6\xe5PP\xb5\xa0\xc7%\x07} \xa0nB\x93\x8a\xb8uY\xdbB;\xfd\x8e;\xfa\xcd)\x19\x96\xb4%\xe4!\x0b\x8a\x92V}\xc8\x07\x0e\x19\xd3\xad\xa8\xc7\x96q\xfd\xb6<\xec5!\x9b\xf2\xfa\xa4\xe5ET\xd54\x99\xb3\x07\x15IAaY\x9b\x8e\xd8'\xb53\xd1\xd8f\xd1~q\x07\xbd\xef\xb0\x0d\x1f\xb1\xe5\x1da\xa7\xd4<fLLP\xaf9%\x8b\xf6x\xdf\x19\xbdnIH\xa9yH\xca\x96e\x87L9\xa4\x22\xab.\xe6\xc7\xbc\xeds.\xd9\x14\xd0\xacW\xcdi\x17\x1d\xb2\xcb\x15q\xad6mj1\xa7Ug\xbd\xe7\xf7\xfe\xcd\x97o\xfd\x99\xe3\x89\x7f\x00\x90\xdf\x8f\x7f\xbb^+l\xfdBw\xbd-\x14\x16\xb3jK\x97\x80\xacf\x11\x83\x8e\xb8\xa1K\x9bS\xca&\xecvZ\xd5\xc7\x8d{T\x9be\x8f\x19\xb6\xcb\x8c\xfdZ\xb5[\xb5n\xd9\xbf3 h\xaf\x9a{\xe2\xd6\x04\xcd\x1b\x96\x93\x97P\xb0-\xa3.\xac\x22\xee#>\x22k\xde5\xfbDD\x8d\xca\xd84\xb4\xa3B\xcc;\xa5aAc\xc7!\xa8\xc5\x09\xef[V\x17P\xd4kU\xb7m+n\xfaq\xdfP0\xe6\xb0S\xaa\x82\xae\xbb\xa8\xdb\x82fW}\xd4\xb2&]\x92\xea\xde\xd1,\x22\xa3E@\xa7\x9a~o:aI\xdan\xd3\x82\xde7\xe8\x0f\x0c\xdb\xd6*gL\xc9yc6]\xf6\x9c5\xe5\x1d\xc1wC\xb9\xb6\x14\xee\xfd\xc9ki~\x08@\xfeSu\xa3\xb6\x98;}m\xfe\xf3\xfd\x8dT`\xc9#*f\xd5t\x09\x9a\xd0\xef-\xcd\x9esAQY\xb7C\xde0\xea\x9e\xa4M9\x0f\x9a\xb1e\xc9\x901-\xc2j\x86\xc4l\x89\xe83&\x22\xad\xa8E\xc9SV<\xea\x86}\xcexUTJ\x8b\x82\x84\xb2\xb7t\xea\x93\x93V\x94\x95\x17\xb3%\xef\x80ia\xedX\xd3oYX\x8b\x03\xe6\xdc\xb3[\x97vuuEu\x11\x15g\x15M9,aI\xbf7\xb4ZwN\xab\x16ei\x0f\x9b\xb0eSV\xd6>e\xcf\x99\x16\x93W\xb6f\xb7MC6U\x154i\xc89k\xce\xa8\xe3\xc6d\xc4\xb4\xf8\x98o\xe9\xb0\xe49\xb3*6\x9dt[CL5\xd8\xff\xfe\x1f\xfe\xe3\xbd\x81\xf4\x0f\x03\x08\xff8\xd0\x13\xf8\xa3;\xc7,~8\xa6\xd5\x92^[\x06\x8c\x09;\xe5\x8ea=\xde\xb1\xdbq\x01\x1br\xdaLZ\xd1\xe3\xaa\xa2\x9ciU+\xae\x89X2/l\x8fW\xe4\x1d\xf5\x86\xae\x9dt\x9d\xb8\x9a=\x96]\xf3\x09akZ\xc4\xa54+\x1a\xd6\xe5\x8c\xb7\xd5\x8c\x0b\x8a+`\xc0\xa2\xdd\xa6\xb5bKU\xabU\xed\xce\xbb&-\xe1\x09i\xbd\x96\xa5u\x9b\xf2\x19\x9b\x8a&\x9d\x10u\xc8\x86\x86%\x83\x1a\x9e\xf0-q\x17\xd4\xf4\xbb\xea\xe0\xce\xce\x7f\xc2\xb4^\x17\xec3.i\xafeeI\x93\x0eZ\x90D\xd4\x11c\xb2H\xcb\xea\xd0f\xcd\xbbz\xf5\xe8wM\xc6n\x5cu\xca\xa4\x9c\x11\xa9\xbf\xb5\xf0\xdf\xb3@~0\x90W\x5c\xf7T\xe4\xd5\xef\xfd?\x7f\xb1;T\x8d\xa4\xad8aCR\xc8\x8aQ\xab\xba\xf4\xd8F\xc3\xb6\xf3\x8aR\x9er\xcb\x83Zl\xe9\x97Ss\xd4\x8cQ\xfdv\x09\xb9&$\xa5W\x5c\xd0\x8cA\x1bz\xcc8\xea\x84wt\xdb%##dK\x97\x09\xbdz\xd4\x9c\xf6\x8e>ia\xfd\xe6\x84\x15\xb5\x09\x8a\xd9\xb2\xcb\x8a\xd6\x1d\xb7\x86C\xb6\xac\xeaTU\x13u\xcf\xcf\xb8\xac\xcb\x923\xe6\xb5Z\xd2iU\x8b\xcb\x86\x5c\xd7k^DFI\x8fI\xcb>\xef\xb2Mi]\x9e0&\xa4\xddM\xfd\xb2\xe8\x16\xd4\xd0kLB\xce\x96\xb8\x84\x16\x05\xa3V\xc4\xb4+j\xb5\xe4\xa85ge\x84,\x0ai\xafl\xac\xff\xda\x17f\x83\xb3\xb5\x1f\x0a\x90_\x0c\xff\xe8\xd9W\x16\xa7\xeb\xff\xa2\xf1+_\xfe\x93\xbf\xd1Z\x0b\x05\x0f\xb8#\xeb\x94\x94\xbc\xaa6\xef\x19U\xb1dV\x9f\xafz\xc2!\xf7<\xe8\x8aa]\xaaZ\xc5,\x89JZU1\xae\x22aC\xaf)\xd4vf\x15\xcfX6\xe9\x90\x88&o\xcb)\xdaV\xd3g\xd0\x86\x80\x94\x84YIG\xbc\xeacVtj\xb1\xa9&,dS\xd4\x9a\xbd\xa2\xa8 n[\xc2y1\xcb\x1a\x9aUl;\xacfS\xbb\x88\x9b>\xef\x9d\x1d\xc5`X\xbb\xb2\x80vqs^P\xda\x91\x19D$\x94u\x99\xf6a\xabB64\xf4\x88\x8a\xcaI*j\xc8\x88\xb8'\xe8G\xbd\xac\xdd\xe2\x8e\xcd\xd4\x9aU-V\xc4\xeb\x9b\xe1\xee\x9f\xff\xb5K\xff=8~ \x90\x0b\xf5W\x16\xff^\xf0\x89@\xd5?M\x1f} u,\xde\xb8\x17\xd8\xa3jM\xbf\x15qU-\x0a\xba5\xec\xd7\xacI\xbb5\x8bj\xea\xca\x86]\x94p\xd9\xdfvYA\xc89m.\xeb\xd2b\xd6!\x15]6\x0c\x1a4\xa1WI\x97\x80\xda\x8e\xdcr\xdd\x80\xb26M*\xc2n\x8a\x09\xda\x12\xb4n[L\xce\xa6Q3:\x95\xc4D\x95\xb5K\x89\x8bks\x5c\xd6\xbaA\xcd\x8avY\x16V\x96p\xd6\xb7\xedr]P\x9fvkR\x9a\xcc\x0b\x89\xc8\x0b+\x88\xda\xb4\xa5\xc9\x86\xb0\x8c\x8a.q\xcbV\x1d\x90\xb7(\xa1*m\xd85\x7f\xc9\xcb\x8e[S\x15\xb6!$\xa0\xcb^\x8bZ\xad\x1b\xb5\xaa\xcfR`\xe0\x0f_\xff\xe5?\xeb\x83\xf0\xbfi\xcb\xfa\xd5@ \x14\x0c\xfcv]`\xea?~\xed\xe4\xc6\x81\xde@*\xd0\xaa\xae\xa1U\xb3\xa8Vm6wf\xa9\x06\xbd\x87\xa45{\x84\xa4%M:\xee\xdf{\xca\xdaN\xdc\xfc=\xcd\xd6\x8d\xee(J\x82\x96\x05\xfd\x88I\xbb\xdds\x5cLF\x11%\xcd\xaa\xfaE\x05\xed6mK\xe4\xbf\xbcO\x02b\xc8h\xe8\xb3\xa4CM^\xd8\xb0\xba\xa2n[\xd6\x8d\x18w\xc0\xb8\xbcn\x19U\xa7\xbd\xe9\x13\xe6=\xeb\x82\x9261A\x1bF,+\xe8\x955\xa2\xe8\xb0\x0d\x09A9q1%K\xcaFl:oVT\x8b\x8a5\xc7\xfd\x0b\x8f\x18\xd7\x22\xa5[ED\xcd\x88\x82\x19\xc3\x0azl\xdb\xae'\xf2\xa5G\xcf\x85\xbfZ\xf5\xc3\x03\xf2\xba\xd7\xea\xaf\xd5\xe1\xef6\x9e\xf9Jq\xd9\xc7\xb7Tl(\x89\xc8\x9bT\xd3&iKJ\xd5\x9a\xf3\xc6\xc5\x1dpS\x9b\x1e\x1fx\xca5m6m\xeaQqOYT\xc4\x923&\x84\xa4\x9d\xd4\xd0\xad\xe4\x0d?gFD\xc3\x98\x94\x9aNm\xfe\xba1\x15\x1b&\xc5\xa5\xf5\x8bIhWU\x95\xf2\xb4\x05+N\x99\xd7&a\xc0\xa2]>\xe2s2\xc6T\xf4K\xd8\xf4\xd3\xde\xb6\x82U\x8f[3\xec\xcb~\xda\x861M\x82\x1a\x16U\xf4i\x08\xc9(\x0b\xca\x8a[s\xc0\xaa\xbc\x88:\x0a\x9e\xf1\xb2\x94\x11\x15A\xbbL\x19\x95\xf2\xb8\x9c\x02\x96\xb5\xd9\xb0b\xcdY\x1f8mCJ\xad\x16\xbe\xfc7~\xebf\xedV\xe3\x87\x08\xe4\xbf\xfe\x0eE\xeb\xd9\xcc\x17\x0a\x82\x81SZ0\xa8\xc5n\x01k\xb6\xe5E\x04L\x1b\x94tC\xde\xb6\x19E\xdf\xd6/*\xa7OA\xc2m\x11!\x0du)]*\xc2\x82\xce\xbb\xa5\xcf\xa8\xf7|N\xc9\xac\x8c\xdd\x0a\x82\xc2\xfe\xb3\x94\x15C>\xef\x0f\x1c4+\xa3\xe6\x8e\x83\xf2\xa2\xde\xd3d\xb7y\x03Z\x94l;\xe2\xb8\x9b\xbe\xe2\xb6\x1f\x15\xb2\xa5\xc5\x98\x0b\xba<\xee\x1d\x1fu\xc7=U\x11c\xb2\xcej\xb1\xa5K^^VE\xda\xee\x9d6\xc2\x82\x03\x0a\x22:\xcdhqT\xc6[\xf6\x1b\xb0GF\xca\x8cC2z\xdc\xd2\xad&\xa8SV\xc8\x09\xedf\x1d\xb4l\xc6\xe7\xddk\xac\xdc\xf8\x8d\xdf\xfb\xd3\xda\x04\xfcw\x00y\xaa\xfa\xff\xae=\xf7\xe1\xf4\xeehp\xd1\xba\xa89y=\xee\xe8P\x93tH\xda'\xac[\xf2\x94\x15#\x22r>\xael\xde\x19GdL[\xb4_\x095!y\xcd\xd6=\xae\xe6\x80\x0b2\xfe\xaa/\xabY\xd2\xacd\xce\xb0V\xc7u\x1b\xb3e]MX^D\x9f\x87L+\x8bH\xa8)\xeex\x99\x16\xed\xf5!\x03&\xf4\x827\x9d\x16R\xf5\xa8YA!\xab\xe2BNk\x15\xd2c\xd3\x92\x94\x92VeaA\xa3\x1a\x8a\xc2b\xd6<i\xdc\x01\xebN\xba\xb9\xf3f\xdfP\xd5.b[R\x97\xb0\x155\xbb-i\x93\x16T\xb7mN\xca)\x19\x0dc\x8d\xfd\x81\xe9o\xae\xff\xc9\xff\xeek\xfe\x7f\x00r=\xf8\x9b\x8d\xf7\xd0|p\xf8\xc9|c8\x10\x11\x17w\xc4\xcb^Rp[\xd4\x98.\x97\xc5%\xdd\xd3nR\xc6\x11\xef(z\xca\xb8y\x11\xeb\x8a\x1a\x8a\xa26u\x0bY\xd1\x22o\xc4\x7f\xf6\xa26\xbf\xe3%\xdb\xd2\xb2f\x9c\xb6))h\xde\x1e\x9d\x06\xbd\xa9nSCA\xd8\xf0N\x22u\xc1^\xdb\xb64{\xc9\xabf}\xc3\xc3\xce\x9a\x93t\xc8\xd7\x0d\x99uW\xc1\xba\x94\xbd\x22\xda\x94\xad\xebR2\xeb\xb0\xa8m1U!\xff\x1fs\xf7\x19di~^\x87\xfdws\xe8\xdb9\x87\xe989\xcf\xce\xe6\x00`w\xb1\x00\x16 \x16 \x09\xc3\x04)\x91ER\x94D\x8ad\x81\x12K\xc1E\x95\xcb%\xabJ\xb6\xa9dKP\xb4)\xcb\x06\x05F\x11\x02\x11v\x01l\xc2\x86\x99\x0d3;agzf\xbag:\xe7\xbe\xdd\xf7\xf6\xcd\xd1\x1f\xd8\xe5\xcf\x82\x04\xed\xf2\xff\xed~\xbd\xe7=\xff\xf3>o=\xe7\x9c\xbc\x05\xe7\xac9\xe8\xb6S^6\xe55\x93\xee*\xea3\xa8_\x87\xa6\xb7\xf7C\x9b\x9f\x170\xa4\xc7u\x0dEEE\xa7\x14\xac\xfae\xaf\x18V5\xe8\xb2\xdew\xe6\xbe\xdd\x19\xb8\xf2A\x00\xf2\xd7\x02_i\xc2\xa9\xf5\xdd_\xa97\x03\x81?KOX2h\xd0]\xfdv\x1cp@\x8f\xa0v\x1bZ\xcc\xfb\xb4\xcb\x9a\x8e\x985\xa0\xdb\xa2U\x019\x09a)M\x0dMu\x9dN\xeb\xf2]\xbb\x9e\xf3\xae\x15\xb7\x84\xe5\x04\x1dTP6!\xe7\x8c-y5E\xfdV\xb5iH\xef\xff\xaa\xca{\xd8\x96e=\x9e\xd5#gCU\xa7\x9aye\x05\x03F\xb4\xc9\x9a5i]\xc1\x19\xd7\x8d\x19\xb0\xee\x86\xa4\xaa\x9c\x82Q\xf9}\x8bN\x87\xa6\xaa\x9a\x01\xefK\xa2\x84%\x94\x8d\x88KX\xf5\x80-\xf7\xb4\xea\xd4\xae\xaaC\xd6\x8e\xb0Nw\xcc\xfa\xb8]%9\xa9\xef\xcd\xbe\xf8_\x0b\xc7\x7f\xa6\x1d\xe1d\xe3\x7fO@\xb0\x9e5\xde\xcc\xeb\xd7fQ\xa7\xb2\x05Ym\x12\xaa\xee\x0aX\xb6\xe7\x11\x1b~\xc3\xab\x06\x0c*xV\x8f\x92q\x03\x92z\x0c\xab\xda\x96\xdco\x93\xae\xbb\xe9\xae1#^s\xc8c:\x11\xd40m\xd8\xafY\xb5d\xd6\x8e\x1e\x19\x07\xadz\x5c\xce\x9e\x82\xba1\xcbJ\xda\xd5\x85m\xe15%g\xcc\xb9\xe1\x8a%\xbf\xa0\xdb\x19Y\xef\xa9\x89b\xc6\x981_\xf7\x9cW\x14\xadxT\xd3\xa8\xa8QA;\x0e\xdb\xd6eSL\xa7\x01\xed\xfe\xba\x0d\x1dv\xdcu\xdc\xac\x1dk\xa6m8\xeb\x86\xac\x8fX1j\xc3\xb69qUu\x01y_\xf4=\xcb\xc6D\x03\x1b\x9b~\x04\xe7?SC\xbe]\x83[\x99\x7f\xfew\x83\xcdb0,\xa7U\xc6\x11\xd7\xec\xe8\xd6\xa9\xa4\xaa\x0dkJF}\xcf\x01MCJ\xba\x14d\xd4\xccK\xa9\xb9k\xd2\xae3ft\x0a\xc9zz\xff\xb3G^E\xdc+\xdam\x0b\x18V\x97\xb6#%\xa0\xdb\x96\xb8\x1e\x19\x1bN\x8a;j\xd7\x86\x01){\x0e\xb9\xe8\xa3^w\xda\xac6\x0b\x86\x04\x84\xe4tY\x10v\x5c\xd6\xa8\x05-V\x9c\xd2\xe3\x86\xc7,k\xb3*\xa2ic\x7fh\xedV\xd2#f\xc1a\x97\xf4\xba\xab &\x22&*\x22( !\xe8\xae=aY\x07\xec\xeaVF\xd4\x96a\xb3Z\x0c\x88h\xd8\xd0\x11\x88<\x7f\xf7\x82\x0f\x86!\xff\xffi\xd4\xd4\x85L\xeb\x95uZ\xd5\x19\x07\x0c\x1auJ\x97\x0b\x8a\x06\xedZ\x12\xd1c\xc7\x9a\xa4\xeb\xee\xa8\xb8&\xab \xa2\xc7\x15-\xa6E\x14m\xfa%{\x16%|MB\xb7=\x83*\xba\x95\xec\xe8\x90t\xd4\x86m\x87\xcc\xdb2\xad b\xdd)o\xc9kX\xd1\x22\xe5\x9a\xcf\xc8:e\xd2\x09s\xda4\xed\x187\xe3%\xcfj\x95WF\xb7]_pG\x02\xef;\xab\xa1\xa4*\xa8b[L\xc2\xb2%7\xadK\xd9\xb1+'(\xe2\x84\x9a6\x19y\x09e\x19U\x93\xa64\x8c\xb9,\xaf\xac(f\xdb\xa7\xccI\xaa\xc9Z0n\xc8\xa2\x07\x7f\x14\x04\xf9a\x009\x19\x0c\xd4S\xa2\xcd\xacQ9\xc3\xa6\xf5\x9b\x96p\xd1\xb6[\x8e:\xa3\xaa\xa8!\xa3\xcbm=2\x1a\x1e\xd0\xef\x98\x82v\x01\x05M\xc7\x84\x05U\xb5H\xf8\xa6-\xad\x1a\xbe`\xc0\x92\x0d\x15M\xed\x1aB\xfa\xdc\x92U6e\xd6Cj\x9a\x8eH\xd8\xf2\xaa\x8a>1\x93\xda\xa4D\xcc\xe8\xb3h\xca\xae\x0e\xc7m\xe9p\xc1\xe7\x8c\xf9\x97\xf6\xd4\x95\xcd\x1a\x96\xf3-IM}\xcez[\xb7\xa7\xcd\xaa\xeb\x96\x15\xb6#\xb9\xbfC\xb9\xac\xac\xdd\xa0]\x05\xb7\x8c\x0bYS\xb6'\xaaC\xa7i\xb7\xfd\xb4\xcb\xda\xd4\x15\xf67\xf5\xdf2 \xa0i\xd5\xd3\xfb;-/7?@@.D\xdf\x0ct4\xa9\xda\x08\x1c\xd1\xab\xac`\xdc\x15)\x19m^\x10\xf4\xb2\xba\x9cVQ$\x95\xed\x8a9\xefe\xa7\xbcgP\x5c\xabqy\xcb\xd6\xb5\x89\xd9\xd10\xe2#\xce\x88y\xd3u\xcf\x1a6&\xaf\xd7i)\x01\x8f\x9aQ\xb2\xa0nP\xd3\x80\xb7\xe5t;\xe6\xa0Y\xa7\x5c\x95\xd1\xe2\xa0\xb8\x8cI\xffX\xddc6uh\xd3\xe7\x8f\xb5zZ\xab\x8c\x07\xb5\xb8\xe7\xa09\xc3\xf2\xfa\xbc\xa6[\xd1\x0f\xdc/\x8dQ\xef\x99\xb4\xea\xa8aIG\xcd\x8b\x1b\xd4\xed\xf32\x8a\xfa\x0d9)d\xd7\xa2#:=\xeb\xefy@\xca\x8011Q\x13Z\x907a\xd7e\xeb\x9e\xb0\xab\xf2A\x02\xf2p\xe5\x91f3@R\x96\xe4\x19R\x00\x00 \x00IDAT\xd8\x01i!iA\xad\x1a\xa2\xf6\x0c(k\xb3\xa8\xdd\xaa\x90\x8a\x1d\x9f\xb6\xae\xe4\x15\x07\xbd\xafb\xcb\xa6MkZ\x04\x0cZ\x110lW\xb7%\xeb\x12b\xce{I\x9f\x19\xd4\xdc\xb4a\xdb\x9b\x0e\x88*9\xa4\xaa.\xe3\x9cVA\xdf6\xa8\xdb{Z\xd4\xb4\x99QsK\xc3\xe3>\xebOl\x8a\x09Y1n\xd6u\x15O\xb8\xe4~\x93vE\xdc\x16\xf3{~\xc2\xbcE\x1f\x93\xdaO\xed\x19qO\xd9\x15qa7<\xac\xcf[\xe2\xae*\x9a2'iVC\xd9\xb8\xefk\xf1{\x1e\x90\x15WP\xb3k\xc7\xae]kRf\x9c2+\xe8\xfb>i\xae\xf5\x03\x14u8\x1d\xb8\xd3<\xf5w\xf7L\x07\x9b*\x1a\x92\xdeq@TB\x9f\xc6\xfe\xe4\x9e\x92\xf0\xa8u/\xf9\x19U\xa7=#\xa5\xcbk\xfb\xeb\xa5\xbd2B2zm\xb8\xcfe\xcf\xf8\x96\x84~\xeb\x92\xae{\xc6\x86M1\x83\xba\x04\x85\xb5(\xbb\xee\x1dg\xedXR3a\xd8\x0d]\xb6\xf4\xd9\xb4\xe7\x88\xbc\x0e\xdd\xb6\xbd\xaa\xcf\x97\xfc'{\x8e\xb9\xe3S*jJ\x82\xee\x993&kS\xd0_\xf3\xef\x9cq\xd27\xa4%p\xca\x15\xbd\x9a\xce\xb9\xa7\xee\xa7\xbc,/b\xcbyuE;\xb6\x0d\xba_\xd4=I\x05\xbf\xe2}\x15euK\xceZ\x10\xb7\xe3\xacUa\xbb>&$n[\xf1\xcd\xed\x97>P@\x06\x02\x0b\xcd\xbe\xbf\xda\xdf\xda\x16X1\xae\xcb\xa6^k\x02\x12\xe6\xf4\xb8\xa7$\xefm\x0do)x\xd0\xd7\x1c\xd4\xeb\xcbR\xbe\xa6[HH]\x87-\x83\x92n;d\xc3\xb8\xbb\xce\xee\xaf\x0e\x84\xf4\xf8C\x8b\x1e\xf4\xbeM\x8fX\x11\xb6\xa4l\xc4/\xf8\xba\xd3vd\xb4\xa9\xe9\xb2eC\xc11\x11=\xf2\xb6ELJ\x0a[\xb2\xe9~[\xda\xbcaH\xc6\x9e.U=VD4u\x99u\xc2%Y\xa7\xbdlP\xd4\x9b\xe2\xaa\x12Bj\x82^\xf0\xf3\xde\xd5\xab\xc5-\xe3\xf2J\xea\xea\xde\xdeO\x11\x0e+\xbb\xa7W@FLDF\xce\xa0\x1b&\xadK\xd9TGV\xf6\xb5\xf4\x8b\x1f( \x07\xdds\xe4\x93\xf9\xa9m\x9d\x12\xda\xad(\x89\x0aK8a\xde\xe3\xee\x19\xf7\x8b\xe8\x96\xb0\xec\x19\xbb\xd6\xfd\x82\xabJ\xaa\x12b\xba\xad\x8a\x0bYs\xbf]\x15\xfd\x1e\xf2]E\x9fv\xc9\x9a\xa0\x01\x05wD<-\xa1[\xd6\xba_0\xe0w\x14$\xdc\xd9o\xe8\x181\xabO^N@A\x5cJ\xd9\x82\x87l\xbb\xeb#^\xb0\xa1\xd3A\xb3Flh(\xd8\x14\x92S\xda_\x81n\x93P\xf0\xb1\xfd\x9a\x8d\x05Y\x01q1+\x8e{\xc1\x19\xab\x0aJ\xf6\xacH\x08i\x0a\xe83nI\x5c^\x8b\xb2-\xe3\x86\xbd+\xa8\xa6\xa0$\xa4\x22%\xa4E4\xb0\xd0<\xb68\xfd\x1f\xffE\xe0\x9b\x1f\x1c \xf7\xc4\xa3\x03\xb3\xe9\x9f\xef\x91\x175/%\xa9\xc7\xbanw\x9c\xb7&jC\xde\xa6N\x03v\xdd\xf4\x975\xd4\x15M\x8b\x89\x1a\x96\x15\x10\xb3'\xa5`\xc5au#j\xf2\xf6\x84\xa4\xb4yS\xdc\xb8\xac\x0d#\x86\xcd\xeb3'\xe8\xa49\x84\xaciW\x92T\x91\xd5\xb4\xeb\xb8A\x1dj\xb6\x9d\xf2\xbcG\xbca\xd3\xa86\x8b\xea\xc2\xa2\xb6\x05e\x94\xb5)\xa8\x986\xa1f\xcb\xb8\xf7\xddQ6d\xd3!9\xc9\xfd\x0a\xb2\xa6I\x0bz\xf4\xd8\xf2\x90\xbb\x22Fm\xeaTpM\x87\x0d]\x16\xb4)\xaak:\x846\xeb\xc6e\xf6\x1f\xc8\x86\x1d\x9a\xf9[\xcb\x7f\x18\x0dL\x7fp\x80\xf0\x89dO\xa5\xf4\xe5\xddf3\x10\xd3\xa3\xac\xa4&lX\x1d\x1b\x0e\x99\xb4$&o\xd5\x90\x01\xdf\x12\xf2\xb6M\x19\xa3\x96l\x9b\xb0&\xa9.\x22\xaa\xachHN\x8b~s\x1e\xf5\x9e~\x1b\xd6\x8c\x98\xf5\xa0]-2r\xaa\x16\xe5d\xdd\xd3\xd4.\xa9hX\xd4\x8a\x90\x16\x11W\x9cV\x964n\xd45\x0fZ\xb2\xe6\x9c\x90A/\x19\x15tRA\xa7uMA\x051e]\x16=\xa9 \xa9M\x9b\xdb\xaaR\xd6u)kU\xd2\xd4\xd0aF\xab\xe3\x82\x96\xb5\x0b+\x99\xd8w\x14\xc7\x15\x0d\x1b\x93U\x92\xb0\x22!\x87\xa3\xaa\xea\x22z\xed5\x22\xb97~\xe7\xc5\xc0\xf5\xe6\x078\x18~k\xefk\xf3\xd1\x7fX\x0b4\x9aykV\x05\xd45\x94\x1d1\xaf,)\xab\xd7\xc7\xadI\x9aw\xd7Q\xb7M\xc9\x09j\x8a\x1a\xb4f\xc2\x96\x16\xbbZ\x95\x0dY\xf4\x8c5kz|G\xaf\x16Ag\xac\xe9\xb1\xa4\xd3\xacV\x09\x13~N\xd8\x9c#Z4\x1dt@\xab\x19\x1dB\x22b\xc6\xccX\xb2f\xd6\x8e\xb85]\x1e\xb4(\xe1]\xcf\xb9\xee~\xbf+\xa6CL\xc2\x88\xb8\x05-&\xa4\xad\x0b\xcb(\xdbQ\xf3sJR\xd6d-\xab\x09\xeb1\xa7a\xc1\x8cMCV\xd5\xe54T\x04\x95\x14\x04e]\xd6\xa3dVYV\x87\x8aY{\xd6\x9ds\xcb\xa7e\x93\x9b\xa1\xce\x80\x0f\x92!p\xea\xe4\xde'\xa3N\x06\x8a\x86m\xfa\x84\xb0\x8a\x8b\x0e:\xeeO\xd4\xc4\xfc;\xe7m9\xe8\x09M#\xb8\xe2\xa3.\xea\xd7\xa7\xb2o\xdb\x8f\xd8\xd1\xaaS\xa77M\xa9\x08z\xca\x9c9+\xd2\xfa\xcc;\xa4\xc3\x8fy\xc1\x01?0\xef9\x9d^\x14\xb7gF\x97\x98\x8d}\xe8\xfb\xb5\x88i\x97\x12\x96\xd6t\x0f\xb7L\xba\xe9\x09\xaf\x18u\xcf\xa3\xaa\xfb\xa3j]N\xb7\x98-\xcfy\xde\x09a\x87M;\xef\x15q\xed\xc6M{\xdc\xae=A\xbb\xf2>\xa9\xc3,\xc2ZtX\xd6\xa3hR\x9b)W\x8c\xd8\xd4\xa1OCY\xd6s\xdeSt\xcc\x8a\x84+\xcdJ\xfa\xef\xff\xeb\xd5\xe6^\xf3\x03\x06\xe4\xe0\xdd\xd0\xdf\xd8\x12\x0aL\x9b\x12\xb4hA]\xaf\xdb\xe2\xda\x1c\xb6\xa9\xdf\x821\xdd\x96\xfd\x84;\xa2\x16\xadIJI\x1br\xdd\x84%Q\x15)5\x8f\x88\xcb\x9bu\xd2\x8b\x9a\xdad\x9d\xf3\xae><\xe9M1Wt\xfbi\xbf\xe7\x05G\x85\x04<#\xef\xb6O\x9a\xd6\xa1\xe8\x84kB\x92:\x1c\x12\xc4m\xf7\xeb\xf2\x03'\x5c\xf6\x93\x22\xde\x17\xb3\xa7\xc3\x82.5!!E\xad:\x95\xe5\xf4\xb8%\xec\xae\xb3\xea\xc2\xae\xea\xb3\xabED]JL]^\xc5'\xbd&\xa4,\xa9)(dYHU\xc9\xd3\xbe\xe7A\xd7$mK\xab:\xe5}#6\x8c53\xe9\xa7\xffe\xac9\xf7A\x03\x92\xa8%\xffFG4\xa3\xc7\x9c\x836\x1cT7bLCVT\xda\x01\xdbJ^u\xda\xff-\xee\x0f\xf4\xdaQ\xd5a\xd8%#\xb6\xb5\x18\xc3\x86\xcf\xa8k\xb3\xec\xb07M\x88\xabK\x9b\x11pB\x87\x15\x13.\x18r\xd7\x86\x9c!\xf7\x84,#\xe9g|E\x87=yi?i\xcc{\xb2\x0e\xa9\xb8\xe2\xa4\xb7l\xf8M\xdf\xf7\x84\x7f/\xe6\xb4\x0d\xbf!oE\x87\x1e\xdbB\xb6\x9c\xb4\xa9[\xcc\xa6\x13\xd2NX\xb3bTIF\xa7yAq\xa5}s\xd1\xa0W=eQ]\xd3\x88i\x01\xa3\xb6\x9d\xb4\xe0]Sf\x15\xa4\x94\x1c\x94pU\x8fmg\xd5\x9a\x81\xadk\xff*\xdc\x5c\xf9\xa0\x01Y\x8b|\xe5\xef\x1c\x08\xbf+\xe7\x98e=\xd6\xc5tX2\xef\x9c\x8b\xda\x14MYtV\xde}\xd26%\xc4\x04\xf5Z\xd0\xaf,\xae`V\x9f\x94\xcb\xce\xc8I\xba\xe3\xb8-QY\x05\x13v\xac:\xa1]\xbf\xa2\x9an\xc7e\xdcvN\xc5\xa6(r\x0e\x0aJ\xeb\x14\xb3k\xd5}z]Vv\xd2;\xeew\xd0k\xce\xb9\xed\x90!w\x04\x5c\xb5\xa0U\xc3m-\x22\x06|\xdb\xe32B\x86\xdd2a\xc5\xac\xff\xce\x1bZUT\xc4l9hM\xd3\xa0\x1655Y\xbbJ\xfa\xd5\xe5Lx[\x97\xabr\xce\xe9\xb4\xe0\x90[\xba\xdcQ\xd50\xa2\xae\xe4\x5csy\xe3\xd2\xbfjon}\x90\xa2\x0eW\x1a\xf1\xfaMO{N\xd5a\xfdN\xea\xd0\xae\xe9o\xba\xeb#\xa6\x1c\xb6\xec\x90\xb2\x92\xa8\xd7L\xbag\xcc\x96\xa01C\xb2\x22\xa2&\x95\xcd8gK\xde%O\xb8\xae)lB\xaf\xb2\x92\xa3\xde\x11q\xc9\x8eyl9\xa7\xdb\xdb\xd6\xf5\xe9\xd6\xe2\xb6\x8ci\xa3vT\xf5\xe9v\xc1]_4\xe2[\x8eXs\xd9\xb3\xbe\xa3\xc3\x8a\xabhs\xde9w\xdcpB@\x0c\x1f\xf7\x1dQ{\x88{^\x87S\xbei\xd81y%Q\x03\xee\x09\x08\xabYP\xb3\xa7\xcb\xa8\x07\xe5\xdc\xd6/\xe3\xaf\xba\xa3\xdbG\xbd\xeb\x8e\x03\x96\x95\xf4\x089aT\xd6\x9e\x90\xef\xa86\x7f\xc5\x94\x0f\x5c\xd4;Z\xae\xffV\xcaJ o\xc2\x86\xaa\x8c!\x05)\xefHZ\xc7\x05u\x1b6\xf5\xfa\x9e\x16\xad2\xb6%\x91v\xd3AM\xd9\xfd?\xa4M\x9fM\x9dr\x06\xb4X\x11vMBC\xce\x19yS\xca\x86\xdd\xd0.f\xc1\x88\x92\xb2\xa2\x90G\xf0\xbe\x0e\x1b\xc6mZ\xf6\x94\xa6\xa8+z,9`\xcc+\x1e\xb6'$\xa9W\xc8\x0dQ\x05\xc3v\xd5%e\xcd\x8aIhsI\xd0\xc7\xec(!&\xaf\x22kLJ\xce\x94{z\x95u[3\xa8(\xa3\xed\xcf,\xe2^3iUEf?\xe7dOF\x5c\xca-}\x889\xd8L\xaf\x1c\xf87;n\x7f\xd0\x0c\xf9\x07\x85\xe3\x85f\xa0,j\xcb\x1d\xc7\x14\x5c\xb7a\xd5\xba\xa4\x88\xf7\x9d4\xa1\xcdO\xe9\xf4\x05+\xde\xd3\xe6S\x82\x96\xa5\x8c\xdb\xd0\x10\x10\xb1'hG\xd3s\xf2\x9a\xf6\xbcm\xdc\x82\xa3B\xf6\xb4Y\x17W\xb7\xe7}#\x1e\xf5u\x1b\x22\xfa%5\xf4{\xdf-\x87-bN\x5c\x8f\x98\x88\x86\xbc\xaa\xa7\xec\x98\xf1\xa4+j\xf6\x8cY\xb7fT\x9b6\xf3\xf2\xfaL\xab9\xaefY\xb7\x83v\xcc\xa9\xb9\xea\x94\xac\x15O\x0a*\xee\x1b\x0az\xad\x08\xaa\xfa\xa8\xebVE\xa5\xd5\xb4\xefgZ7\x84t\xdaS\x16Q5,\xe4\xaea;6E\xc5\xec&\xfe(4\xfb_\xcd\x90\x1f\x1a\x10\xa9\x95\x94fT\x97m\x0f\xbb\xa8\xea\x01!i\xbdvd\xf4\x0b\x88:\xe0-\xbd\xfe\x9d6\xf7k\xf7\x9a\x801\x05i-\xba5\xd4Li\x17\xf3\x92\x97\xc4\xe5\xdc\xf1Eu5\x0f\xbb\x22)'\x22)\xef\x80\x84\xa6\x0b~\xc6\xb8E\xd7\x04$\xc4uJ\xdaAU\xbf\x01m\xe6mJ\xdb\xd2\xe7\xff\xb4\xadM\xc9\x94a-.\x0b\x19\xf7\x0do\xa9\x09x\xc2\xa2^yO\x09\xda\xf4M}\xb2\xceY6\xac(e\xd0\xb6\xf3\xa8\xda5\xae\xae\xcf\xba\xac\x87\x8c\xaa\xaa:\xa4\xd3\xb6\x98N\x19SZ\xd45\x94\x94$\xac\xab8bD\x9b\xa4]\x11-\xcd\x96\x1f:\xa5\xf7Gpe\xc5\x12\x87\xfeV^O\x80\x09Aa\xa3V\xc5E\x9d\xb6nB\xab\xa4\x05\x8b:|]\xd2\x80YM\xed\xea6E\xc5\x15\xecjS\xdb\xdf=\xfc)\x0b\x16\x0c\x1a\xb4\xae[\xbfoH\xca\x18PrL\xbbU\x15\xeb\xbe\xe4\x07\xde\xd2\xa5[\xc1\x96\x82\xe7\xa4\xe4eu\xeb\xb1$\xa6[\xb7\xa0A\xef\x9bt\xc6\xdb\xeeY\x93\x15w\xc4\x86-\xbf\xee\xackz}\xcb\xa0v!\xbf\xef\xbc\x15)e\x1f\xf5U\xe3\x02VM\x18\xb3\xea\xd6\xbe\xfd`\xc7AT\xa5\xad\xebt\xc6\xb2\x19\xc3bB*v\x9dwo\xbf\xaf$\xa0!!\xe9\x96Mqcv-5S\x8b\xdb\xff\xa6\xc5\xf2\x07\xfd\x96u\xb8#\xfe\x9b\xa5f{ $\xe8\x96aEU\x19{\x96\xed`CC^\xbf4*\xf2\xc6\xe5\xb4X5\xa4\xa0 \xa5($mX\xc2\xae\x80qiUe\x8f\xb9\xe1\x9e\xc7\x5c2j]\xd2c^\x91\xd6)\xea5\x13\x86l\xef\xfbk\x9f\xf3\x92[\xd6\x0cJ[0e\xc89{\xb2^\xf7i\xef\x98\xd6\xe39Ec^\x13Uq\xbf\xb7\xf5\xd8\xf5\x03\x87\xb4\xb8dH\xd3\xb8%\xad*\x12\xc2\x1a\xfa\x94,\xdb\xd1\xaelZ\xc2\xa6\xa3\x96\xecJ \xaa\xe9k>\xab\xe0\xae\xba\x90e\xc7\xbch[\x8b\x86~w\xad\xeb\x94u\xdcI+\xda\xcd\xf9\xd9\xe6\xb7\xde^\xfc\xda\x13\xff\xd5\x9fN~h@\xc6\xbbz\x7fc\xa5\x19\x0d\xf4\x99vF\xcd]A\xabz\xf5)I\xf9\x8c\xac!\x0b\x92\x16\x054\x0c\x0aXu\xc6-\xed\x22Z\xc5\xd4\xb5H\xaa9\xe2\x98\xef9\xa6O\xab\xd7\x1d7`N\xc9\x92G\xc5\xbd\xe3\x19\xe3^T\xf7\xdbn\xbahOI\x87\xaa\x05\x15?\xe3=[\x92Z\x1cq\xcf\x92\x15?\x89?r\xce\xe7\xc5\xfd\xa1\x94\x19?\xeb]?\xe6?\xaak\xf5\xa7\x1e\x15v\xc136d\xac\x18\xb7\xa3\xa1\xe2\xb4u\x1b\x06\x1c\x11\x96\xd14\xa0\xa2nN\x8b\x11EQ;b>\xe1\x8fM\xaah\xd7p\xc0\xeb\xee7\xa8nSS\xde\x136\x84-{\xdf\x80%\xcf4\xbf\x11\xf8\xcc\xef_|\xf9z\xf3\x03\xbf\xb2\x0e\x0d\xf6\xfe\xaaf9p\xd0\x92\xa4-'\xec:`K\xcd}\xba|G\x87\x16\xdbn\x1b\xb2a\xd2\xba\xb4'\xbd\xacS\xabc\xd2\xb6\x85l\xabhSTv\xd8-1W}\xcau\xc3\xa6\x91\xb4(\xe9\xd7\x5c\x97\xd6\xad\xcb\x1b\xce*Z0l\xc7\x86/JI\xbbl@\xd9\xba\x1e\xed6\xfc\xb2\xe7\xfd\xa1O\xa9\xd80cHT\xdc\x05\xc7|\xc3\xc3>\xe6\x9aIoZu@YHI\xc9y\xab\x82b\xca\xee\xear\xc8w\xb5\x18StAV\x9f\x826\xf7\x8c\xbb\xe3qk\xaey\xd2\x96\x02X7!\xe1\x05\xf7\x9b3$\xec\xa6\x84\xb6\xfd\xaa\xca\xb2t3\x1al\xfd\xdd\xf7.\xf9\xe05\xe4\xc1\xd4\xc2\x97\xc7\x9aG\x027\xa5\xf4\xc8\xc99\xea\x8a\x94\xbc\xb2u\x87\xcdJ+(*\xba_AH\xc2\x926q{\x8a\x8e\x08\x0a\xabiS\xc1\x09]\xeez\xcf/xU\xde\x8a\x9c\x9a-G\x0d\xfb\x86\x09\xfd.y\xda\xaa\x9bn:\xed\xa6\x8c\xa4-\x1d\xba\xdcU\x97\xf1\x84\x80\x92\xb8o\x19\xf2yW\xdc\x14\xd3%\xa7\xcf\x1b\xfe{\x7f\xe0\x7f\xf6G\x96uX\xb3\xe41\xb3vm\x09h5m\xd4\xba\x90]\xe75]\xf0\x17]6l\xd5a9u\xad\xde7&\xadMF\xd2\xfdn\xe8\xb5\xa9\xd5\x86=\x05%c\xaeK\xd9EUCYA\xdd\x80Y\xc7\x1aG\x82\xff\xfa\x7f\xb4\xe6\x83\x7f\xcbZ=4a\xc1\x9c\x82\xa4\xba\x16!+\x8a\xfa|\xc2\xa0s\x96\x0d\x0aY\x10\x126#'\xa9\x84\xb0\xa8\x11O\x99\xd7c\xcdiK2\xa2\x8a.\x0b{\xd0\xef\x8a[p\xca\x01\x81\xfd\xca\xbb\xbf\xachK\xb7wty\xc0\xc7\xbc\xe7\x88\xa8N\x0f\xb8\xa7 \xa0E\xcc\x9bZm\xcb\xfb\x92\x0de9\x87\x15\x8d\xdaS\xf3\x84\x9b\x9e\xf3\x8fL\xfai1\x1b\xees\xdb\x80n\x1d\x06\x85\xc5\x0d\x0a*\xe9\xf1\xae\x86qW\xf4\x995\xe9\x07\xf2b6\x1c@\xbf\x92\x0e\xef\x0b\xd9\xb3bB\xd1\xd8\xbe\x19tCJH\xbf=\x01]\x06D\x0d\x99\xd3\xed\xfd\xfa\xbb\x5c\xe6C\x00\xe4h\xfb\xaap \xe6\xac\x19\x9b\x9ab\xde\x97\xb2\xed\xb2n\xb3FL\x18\x15\xd4\xa2\xe4~e\xf7\x1cP\x13\x174\xa2\xe4\xb8\x17uyK?\xe6Tu)\xef\x07\x91}\xce\xcb\xe6T\x85\x04M\xbb\xe1\xb8\xb2\xa8G\x94l\x99wL\xce\x90\x0d{~\xdae'\xcd\xebVQ1\xe6\xb8\x92\x0e\x17d\x1d\xd6\xe7=\x0d+Vd]\xf4\xf3\xb6\xfc\x89\xac)\xf3\xda\xd5\x84U\xf6C\xa3Ve\xec\xb9\xaaGZ\xca;\xdaT\xbc\xe6oZ\xb1\xea\x84\x84\xack\xdam\x1a\xf7mc\x06,\xe1\xaeA{6\xf7\x1b>k\x9aF\xad\xc9iq\xd9\xc7\x1cr$\x1a\xfc\xa7|*\xf0!\x5cY\xd9\xc7\xa6>\x9b\x92\x0d\xcc\xba\xcf\xa6\xb4\x8aV!]\x8e[\x90\x90\xb5\xecU\x0d\xa3J\x1a\x8a\xa6\xecjQ\x96T\xf3\xa8a\x97\x0dH\xe94*\xae\xe0q\x97\xf4\xea\xb5\xacl\xc7\x09W\x94\x0d\x190',oMUT\xb7;\x96\xb5\xd9\x11\x14uZ\xc25E\x05\x8f\xab\xc8\xc9\xc8k\x95\xd5nO\xd5\xf0\xbe\x00\x1fS\xf1\xbcQ\xedF\xac\xdbu\x9f]\xddv\xf4Y\xd2-*\xa5f\xd2\x8a\xa4\x01\x01!)[\x0eH\xab#-h\xcc\x96vi\x13\xea\xba\xe4\x15\xd5\x1c\xb6\xa5!.&d[\xd4\x8a\x84\x8a^Ew\xd5\xd4\x1d\xfd\x5c0\xf0j\xe5C`\xc8\xf9\x03K6-\x0a\xb9'd\xd4\x94\x9c\xaa\x82\x845%)%I)M\xddnhqG\xab&\x12~\xc2\xb7\xbd\xe0\xb0\x82\x87\xec\x9a\x13\xb6\xe9{\x1eW\x94U\x96\xf6\xac\xabz\xc5\xac;\xe4)w$\xac\xcb\x08\xdb\xd3c\xd4\x1d\xfd\xc2\x9e\xf5uK\x1e\x13P\xf3\x8an\xedv\xb4\xd8\x95TtA\xbfu\x1fW0h^\xd6/\xea\xf6\x80\xefY7\xe9\x829\x8f\xa1\xa6\xa1\xc3\xa2\x03\xda]5%&\xab\xa6\xa2\xc5\xa3\xfeD\xab\x94Mq=\xde\xd2\xa9C\xca\xb4\x82Y\x11\xdd\xfa\xbcf\x5c\xab\xa2\xb2\xaaVQA\xbd\x02\xee\x18\xd3\xe2'*\xb3\x17W\xd2\xbb\xc5\x0fx\x0d\xe8\xcf\xce\xd8\x17w\xce\x0fi\x06:\xb5\x0b\xa8\xaa+\x18\xf5\xb0[\x1e1\xad\xc7MU-r6\x9c\xb1$\xacCU\xc1\xfd\x1a\x22\x06\xd1\xe5\xab\xc6\x8dj\xd3/%)oQ\x97\xc7\xbd\xed\x84\xef\xe8\xd5\xabnMD\x8f\x0dm\x9e\xb6\xeb\x0fU\xf5\xa8\xe8\x92\xf1\xb0\xb7\x95d\xb5\x89\xb9\xab\xd5\xfd\x8aV\x0c\xba\xeb\xa49\x01\x09\xf7\xf9\x13\xa7%\x15m\x9bw\xda]1\xed\xfa\xbc$\xa8\x0bM;\xa2\xcar\xaavt\xaaX\xf4\x09\xff\xd6\x17\x94\x5csH\xdd\x9eC*r\x1a\xa22N\x89\xbbdJ\x8f\x1bj\xce)\x1a\xb3,\x8e\x82\xba\xa4u\x07\x9bK\xc1\xe4\x7fx\xfe\x85t\xf3\x03\xd7\x90\xdf\x0eP\xb9\xd5\x13H{\xc0\x9a5Kz,\x8b\xc8\xfa\x03\xa7}\xd5\xa4o#\xea\xbc\xa2\x88\xb7=\xa0\xc5\x9c\xc3\xce\xca\x08\x0a\x98\xd0\xf4\xbe\xbfa\xce\x15ox\xc8\x83~WP\x9f!\x8b\xa2\xbe\xe5\x01\x9b*\x16%\x84\xcd\xd8u\xd8?\xf0\xa6\x7foEU\xd6M{\x82\xbe\xe4\x1dQ\x8bj\x22\xeesS\xd9\xfd\xe6\x1c\xb1\xeaq\xe7\xbd\xe1+\xfe\x0a\x16\xbd\x8f\x9f7#%\xadCD\xd0\x01\x11l\xe8\x926$*g@\xaf\x9a\x8f\xfa\xc7>oZ\xde'\xec\x88\xe81\xafb@LL\xd9\x9a5\xe7\xdds\xcd\xe7U\xbd\xa5\xe1\x8e\x11q9\xed\x22\x22\x12\xdek,\x07\xdf\xfe\xbf\xfc\x88\xce\x0f\xc5\x90\x8f\x05\xbf\xe7\x8f?\x9b~\x22i3\x10\xf6\x80m\xdb\xda\x8c{H\xd8\xbc\xf3\xee\xda\x15SQWW3lYH\xca\xb6\x92\x09E\x07\xfd\x03\xbf\xa4\xe4\xb7=\xa5\xe4>\xb3\xf2\x8e\xab\x89\xbb\x83M\x09\x9bZ\x14\x9cP\xb5\xa4[\xc1U\xbfl\xc7\x05)!$T\xec\xca\x09\xd9\xd0pP\xab\x17}|?\xb02*\xe2\x9a\x15\x0f\x19\xf4/\x8d\x1b\xd7\xa9\xd3\xb2.\xf3\xca\xaa\xd6\xf5\xd9\xb1\x8d\x98\x84\xb4n\xf9\xfdR\xbc\x1e]\x12\xeeJ(\xa9\xd8\x16WP4\xea\x86\xa0N%\xc7-*xL\xc5\x0fL\x19\xb7-fGT]H\xdc\xaea\xe1`b\xf1\xf6o\xfd\xa8\x00\xf9\xa1\x18\xf2?4\x02\xcdhW\xb9\x19\xb2\xa3\xcd\xf3\x12&\x15\x04,J\xdb\xf3\xbe\x8c\x82\x9c\x80\x84\x9a\x94\x0d\x05\x1f\x91\xd4\xe1\x8cw4\xbd\xe5\xa3~\xcf\x9f\xfam\xab\xf2^wG\xaf;6\x85L\x0a;!bG\xc0\x88m5?\xe6\x92\x1e\xedn9k\xcd\xba\xa45[N\x0bZw\xd7\x84n\x0bR\xce\x89Z\xb3\xe2\xa8Mu]F}\xdf\x15\xbf\xe3\xba\x8aW5\x9c\x154,(m\xc9Q\x9b\x8e\x88\xab\xca\xa8\xca\xebqT\xd9-\x93.\xd8\x13\x15\x11\x93\xd1/\xac\xcf\x8fY\x16u\x9f\x1dg\xac\x0a\xebt]\xd3\xd3V]3(\xabUAA\xdd\xac\x98E\x15\x9b\xff\x0b\x1f\x0aC\xa0%<\xf8\xa5\xf1f,\xd0\xa3\xa1!hW\x5c^]\xb7.\xcb:l\xfa\xd4\xbe\xdb5\xa6\xcf;\x06\xadJ\x9b\xd0\xa6h\xd7q\x13\xbe!\xe2\x88\x84!'}S\x97)\x97\x0c\x996\xa0\xa8\x22`\xd29\xabR\xf6\x14\x8dzW\x97\xb0\xa0\xba\x8c\xc9?{\x22\xcd\xc88\xa7)oU\x9faW\x9c\xd3e\xd9\x94aM\x17\xf7s\xdf&\xa5\x84]\xd0\xab\xe1A\x7f\xea\xbc-\xab\xc6\x04\xa5-x\xc4;\x86\x94\xddt\xc8\xa8\xb0\xec~\xb0\xcc\x92\x9c%U[\x22\xda-y\xc8\x0a\x12\x16]uNP\x5c\xb7e!1m\xc2\xca\xba\xed\x9a\xfaK\x81\xdav\xedC`\x08\xa4\xba\x12\xee\x06\xfe\xac|\xae\xc7\xa2\x8aei\xe32\xee\x9a\x953hNESN\xbf\xb4\x8f\xc8\x89\xe8\xd6\xaa\xdb\xb8\x87\xd5\xa5\x8dJ\xb8mJ\xbb\x17}VMF\x87\x86\xaa=\x9b\x9a\xfa}\xcb\xa2\xb4\xe3\xb2:\xb4K\xc9\xdbp\xd5\xb8\x01\xb3J\xba\xadKhu[J\xc8\xaf\xcb\xd9Q\xb7 #a\xde[\x0eH9\xa9\xa2h\xda\xeb.:k\xd1\xa6\x1fx\xd0\x0cF\xdd\xb3\xe4#\xa2r\xc2\xa2J\xc2\xea.\x88h\xd5mE\xd7\xbe!t\x5c\x9b\x9c4^\x16\xd1a\xdeIg\xac\x0a \xa3K\x9b\x86nuI\xf5Z\xf7N1;\xe0C\xb9\xb2\xa0\xbd}\xda\xb2=\xfd\xae\x9b3\xe4\x88~\xad\xaeZ\x11\xd6bHLH\xab-\xad\xd2\xfa\xbc*,\xa6K@\xc0\x86\xb4AAk2\x06}\xdd\x92\x9c)U\xd7$\xacy\xc4\xae\x09U;&\xa5d\xbdj@\xc1\xb2\xbc\x1b\xfb\xc9\x87\xeb\xa2\x1e\xb2`JU\x1f\xd6\x9c3kB\xc9S\xee\xd9T\xd7\xf0\x985\xbdn)\xf9\x9c\xaa6=\xe2\xba\xb5iw[TMX\x5c\xcc\x1d\x9d\xder\xc6\xb2)53\xba\xac[\xd30e\xd7)\x09\x05[\x0e\x0b\xd8\xd4\xb0gYVB\xd6\x9cA\x19\x9d\xf2:m*\x9b\xd1\x10R\xa8t}\xff\xa5\xfc\xb3\xe5\x0f\x0d\x90\xa9?\x1dq\xa4Y\x93\xd5\xea\xb8\x05\xd3\x02\xdaD=\xec\xa2\x8a\x82^\x19\xfdb\xa6\xecZ\xf4\xe3rB\xaei\x93\xd3pA\xde\xa4\x84\xfb,\x1a\x96\xd0\xe7O\x0d\xa9{X\xdc+\x8e[\xd4!)f\xcb\xa0-!\x0de\xcf\xf9\x98\xef*\xda\x15\x14\x120)!\xe9\x9e\x80#\xa6\xfdO\xa6u\xbb\xe4\xb3:U\xb5\xba\xac\xd3\x9a\xa49/\xa89\xa8\xcd\x7f\xb0\xa2_\x8f\xac!sv\xa5\x9c2\xac\xa1\xc5\x03\x9a6\x04\x05\x1d\x90\xb7.gA\x8b]\xed\x1a\x82\xee*\x190d\xdbqq\xed\xd6\x0c\xe9\xd4\xe2\x8a\x11w\xf4(zJY\xadY\x8b\xef\xdc\xe6\xef4?4\x0d\x99\x18Y\xff\xe5t3\x15\xa8\xed\x07\xf3?bZ\xd8\x80y\x05\x07\x05\x0cZ0\xef\x94{R\x12\xe6\x8ciu\xd6\x92\x84\x98\xa6\x16\xdf1\xe6\xaa\x03\xf2\x06\xad\xe8\xd3bMA\x97N\xefi\xb5a\xc8\x80\xac\x82/\xf9OZ=\xe9\xfb\xbe\xeb\xa8\x80\xb8\xb4)\xa4\x5c\xb7n\xd4\x86VIg\xb1\xa4_Z\xdc\xa0\xdbr>\xa7\xc7\xac\x88\x93\xf2\xe8\xd5!&\xabK\xc9\xb2\x11]\x22\x16\xed\xed'\xfc\x04\xf63O\xceZ\xd2\xa6\xea\x19\xf7\xe4\xf4\xda\x10\xd1t\xc2\xbb\xfa\xb5Z\x11\xd7fI\x87\x11o8\xa2\xa2 \xedAoK)\xd5\x1b\xa1\xa3\xbft}\xfbC\x14\xf5\xe1Oo}\xaeMO`]R\xc3\xfdb\x0e\xa9\xd8tC@Z\xc6\x9eV'\xbc\xe9\x13\x8a\x92r\xae;\xe2E}\xa2\xf2\xee9b\xc2w\x8d\xd9\xb4\xa5\xcd\xd3f]1\xa5\xd3%\xc3\xcav$\x945\xf5\x1b\xf1/\x9cw\xd0\x0f\xb4\x1b\x13\xb5\xa0$\x22\xe7\x98\xb4\x9b:\xad\xe9\xdf\xcfr\xff\xa2\x13\xde6\xea5q\xc3\xbe\xe8\xef\xbb\xe4\xa4Q/hwX\x9fkZ,\xc8j\xd7-h\xc1\x9ea5\x11uw\x8dK\xcb\x08{\xc5\xc3\xee8\xec\x8f\xa4<\xee]Y%C\xde2\xea\x8e\x86\x88=k\xce\xda1\xe3\x17\xfd\xa9\xb8)\xdd^\x15\x130\xd2\xac\x06\x9f\xff5>D@\xbe\xb8\xbd\xf0\x1b\xd5f$\x10\xd0\x14\x96\xd0.\xad!$\xad&\xe9c\xf6t\xbb\xa1[i\xff\x83x\xd0\x9e#:\xa5\xcc8*\xe0\xa2Cvq\xd0\xa8\xff\xd7ox\xc7\x1d\xa7\xb5\xd8\xb4\xaa)\xeb\x88\xa2n\x8b\x1e\xb7\xe9\x96\xd3\x96\xac*\x8aI\xaa\x1a\xb2##(mW\x9f1\xc7\xd5\xdds\xd6m#n8\xaf\xec+\xce:\xa9\xa1MM\xc4\xbc~!\x97-k\xd5\xe6}\x93\xf6\x04\xe5\xb1'(\xa7\x22#\xa1KR\xbb\x84m\xc7$\xb5\x1a\xf4\xae\x01E\x09e)\xa4\x0cjs\xc1!O\xf9\x8a\xa3\xd8\xb6\xe6\x84\x16\xb7t\x063\xdf\xde\xf8\xea\x8f\x12\x90\x1fZC\xbe\xd2\x0cy\xc6=\x07mz\xc8\xb2[\x96T\xbccJ\x0e7UmJh\xc8h\x17pK\xb7\x88\xb2A\xc3\x1ewG\xc4_rGD\xc8\x1d\xaf\xf9\x15\x7f,\xe0\xd7}\xdf\xaeU\xcfH;\xe9\xaa\x8a\x1e?\xee\xf7\xe5$\xfd\x8cC\xd2F\xed\x19\xb5g\xdb\xe7\xfc\xac9G\x9c\xb5#\xed%\xcfx\xd2\xd7\xecX\xf4%\xdfV\xf5[\x8e\xd8\xb4\xea\x9b\x0a\x06\x9c\xf5ui[\xbe(\xe3\xae\xbfh\xc3\x96\x8aVi\x87\xa4\x1d\xd3\xf4e\x09+\xf2\x16\x94Mj\xb3\xe55\xdb&\xb4\x0a\x99\xd4cs\x7fKf\xce/\x9a\xf5\xffxVX]A\x8f\xdb\xd6\xfd\x8aM-\x7f\xc4\x87\xca\x90\x8e\xae\x91/_l\x1e\x0f\xcc\xe8\xb5\xec\x01w\xb5[\x94\xdf\xf7\xf8M\x89\xc8\x89*\x0a\xc9\xc98fK\x5cPU\xca\x15\x03\x86\xbc\x83]m\xc2\x06\xdc\xd2\xa1\xd5\x86\x84UG]\xd1a\xcdQ\xeb\x1e\xf0\xbc\x832\xc6\xac\xfb\x9e\x80\x90\x0ekh\xb3\xa9\xd7m\x9b\xb6\xf6\x9f\xeb\xb8\x0ds\x1e\xb0.cH\xd6\x1bN\xd8\x14\xd0\xb7\xbfQ2\xa4[\xc9\x15C\xca\xde\xd6jX\xd8u\x8f\xdb\x94\xb4\xa6\xc3u\xe7\x94\x15\xb4(Y\x90\xd0\xa2O\x8b\x8b&]\x95\xd8\x9f\xebW\xf4\xe8\xf4}\x8fk\xcai\xc8)H\x88J\x8a7W\x02\x9f\xf8\xe9\xf9@\xb6\xfe!\x02\x92\xab\x1c\xf9\xcd\xf1\xc8m\xad6|\xc4=#\xa2\xb6Et\x08\x1b\xb5\xed\xb8MkbFl\xed\xdb\xefw\x8d{\xce\x8c\xaa\xb2nUYu\xeb\xb2\xba=fURU\x17\xae\xdb\xb5)\xae\x7f\xdf\x00tI\x87u\x17\x9d\x13qGP\x97e\x1fwY\xd4\xba\xac\x8a\x9a\x93Z\xbd-\xe87\xfd\xbev\xef\x992\xacdC\xcd\x97\xfd\xbe\xb0\x0e\xdd\x0a\xeeY\x16\x17Su\xd0\x16R\x9e\xf5\xaa\x92\xa2\xf0~\xfc\xf1\xd6~\x83\xfb\x8fyO]\xd0\xaa3Vt\x08\x08\x09\xeb\x10\xd6\xb0\xad\xa1\xae\xa4\xa6KP\xbb\xbc\x9a\xa6\x0b\xf5\x13\xd3\xe7\xfeyK\xf3R\xe3C\x04D5\xf1\xab\xbb\xa9\x0e9\xf7\xdb\xb3\xa3`\xdc\xbbJrz\xd4\x95\x15d$e\xc4%\x91\x137$iJ\x0c\x1b\xea\xa6\xecZ\xd0!, \xe6\xb6\x1ew\x8c\xdbT2f\xd1#\xb6\xb5:\xef\x15IQ\xad\xeewS\xc0\x96NiM=\xf6\x8cK\xefO\xc8\x8b\x82\xce\x18\xf0\xba1/y\xc8\x9cI\xeb\xb2\xc2nhu\xd4\xac#r\xba\xdd\x15\xb1!\xa1.\xa2.\x8f\xa2\xa4\x90\x8c\xb8\xb4\xfb\xd4m\xa8\x88x\xc7 \x86\xad\x09\x18qC\xdd\xb6\x1eW\xf4\xcb\x9aB\x975M!KZ\xacz\xd8\x96\x96\xd2\xd6\x0b\xbf\xf3\xc7?J8~h\x0d\xf9R\x80\xe3W\xba\x84\x84\xf4\xa8;`\xc4W5}\xc6Ya=\x06D\x85u\x1b\x107+d@\xd1\x8a\x0e\xdf\xb7g[\xbb\xa8\x97-\x19\xb5c\xcbG\xdd\xf2\x8c\xa6]5\x1b\x86\xd4\x1dpY\x04\xdb~F\xd3\x8a\xbav%\xef\xb9OUP\xd2\xb2\xa0\xd3\xba-\xabI:\x22j\xc7\x9e\xcbn;++\xe2\xa2u\x1fwPI\xafYOz^\xda\xae\x836\x9dS\xb5\xa0\xea\x94=\xef\x1bS\x95\x15\x955\xe1\x15\x1b>/#\xa3fT\xa7\xab\x8e\xbam\xda\xa7\x05\xb4\xbb\xb3_\xcat\xc9)\xef\xe95!\xacKQ\xc4w\x1c\xae\x87\x92\x13\x7f\xc0\x87\xaa!\xd71\xdc\xbd\xfd\xa9\x1eM\x0d\x9bNXS\x95\x92\x91\xb5\xa5]VZD\xc5\xa8m\xedR2R:T\x8dI\xc8\xeb\x91\xf5\x05\x057\x14\x84\xb5\xea\xf5\x80W\x1dU\xb0\xa2\xa0b\xc6\x13JZuY\xb0d\xd4\x17-\xbb#%.'\xa4\x88\xaaA\xdf\x95@\xc9y\x17$<\xe1'\xbdl\x03\x07TE\x94-JH()\xca\x0a\x0b\xb9\xa3\xc3\x9c\xaaIk.:\xaf_V\x8f\x05A\xadH\x19\xd3jE\xbf\x90\xf7u\x22g\x92\xfd\x86\x94veY-\xa2V\x8d\xc8Z6iCMRQge>\xfc\xfa_8\x10\xc8~\xb8\xa2\xfeH<\xb9\x1e\xfb\xf5\x15M\x0da\xb7L\xdbQW\x94vNHNCZ\x9b\xdb\xdam\xd8\xf6\xa0y]BZ\x04|\xdc[\xa2\xae\xb9c\xd2\x01)wT-\x09\xc9\x9b\xd4\xb0aY\xbb\xa2!7\x95\xa5\x1c3\xe7\x1b\xce\x1a6oO\x9f\x19\x9d\x1e\xb7nEC@F\xd4\x05\x7f\xdb\xb3\xfe\x83o\xfa\xa4\x01\xefZ\xf3\x90!\xbf\xe3\x13n\xb8\xabl\xdd\xff*dG\xcd]\xfd\xd2\x16\x0c;\xe7\xa2\xacvkZu\xbbc\xcc5\xf7$\xe4T4E\xad\x1a\xb3\xa7\xdf\x92Oz\xd7\x9a\xa8\x8a\xa6!9t\x99v\xca\x8a\x90\xa4w}\xc2\xb5p\xdf\xef\xcc}=\xebC~\xcb\xea\x8c\xac\x04\x1a\xbf\x9e2\x1e\xecTu\xca\x8c\xaa~{\xc2v\x15$\xad\x19\xd4fG\xbf\xaaA\x0d3>-aP\xd6;\x8eX\xf6W\xadZ\x92\xb6\x87\x93\xda\xf5[\xb0n\xd9!\xa36$-yV\xd9)\x175\x9c\x16rO\xd4\x9cv!I\x051O\xef;\x02'\x1c5\xef\xb2\xd3\x0eyS\xc4A\x1d\xbe\xe3\xb6\xbf\xe8\xa2!m\x0e\xea\xf1}/\xf8\x84iE\xdb\x1e\x15\x94\xb1k\x5cNY\x8b\x16\xb7\x8dX\xd6\xab\xee)\xab\x06\xe5\xb4(\xfa\xa4{\x82\x1e\xf6]\x03\x0a\xfa\x95tY7lW\x873f\xc5\x8c\xd90\x84\x86\xdd\xcf\xb5\x17\xb7\x1b\x1f2 \xc7\x22\xad\x91\xb6//\x04\xeb\xc1.\x9d.J\xeb\x91\xd7\xa7CDJA\x8f\xb0i\xe3v\xd4\xc4\xdd\x133\xe0\xb6\xb0!\x0f\xdb\xf6\x19\x7f[\xc8m\x8fx\xcb\x97\xddrC\xd5\x09Sv\xe4\xad\xca*\xf8\x82\xff\xe8q7L\x89\xca\xbb\xae\xac\xe0\x93\xcav\xad\x1aT\xf1\xa0\x17\x05\x15\x0c\x8a\x99\xd1\xa6\xc7]]^0/\xec\xe3J\xb6Li\x0a\x9bw\xd7/\xfb\x9c\x7f\xe4\x86\x9f\x92\xf6\x12\x1e\xb6gMJ\xa7\x1d\xeb\xfal\xf8\xa8]\xdc\xf2y\xdf\xd3m\xc7\x01\xdf\xd1g\xdc\x1f\x19\x15\xb7\xa0\xa2U\xc2\x8a\x03Vm\xdb\x13\xd2\xe9\xba\xb0\xa2\xc5F\xe3b\xed\x9f$\xad6?d@\x9e\x0cO\xa56\xffVUg\x80\xb4\x80\xb0\x92n\x0b\xba\xed\x0a\x8a))jQ\x92R\x964\xa0[\xc2\xa4\x11a7\xad\xbb\xe4\x17\xbd\xa5\xcf\x9c\x84\x909Q\xe3\x22^P\xd5!,\xa4KE\xa7\x9aNw\x95\x04\x1d\x10\x103\xab\xa4\xa8S\xb7C^0lN\xd2\xb6\xa8Q5\x0b\xc2n\x99\xf2\xb0\x15ya\xc3f5\x95L\x98t\xc5\xf7\xfd\x15)_S\xf7\x90UA3\x1e2\xe7\x9c]u\x9bF4\x14\xed\xaa\x99\xf7\xb8YmJJ\x8ey\xdd',h\xd9\xbf\xac\xa2\xda\xad\xecWM\x96U\xb5k\xb3\xd7l\x0d4\xff\x8f\xab\xaf\xff\xa8\xe1\xf8/\x98\xd4\xe7\x83\xef\x86Cb\xc1\xbc.Kz\xec\x09\x9aw\xc2\xb6\xa3\xf2vTl\xab\x8a\xeb\xb5bK]\x9b\xbc^3\xeeX\xc6\x19\xff\xd6\x80Muu[\xbaMxC]\x97\xa8\x19\xc7\x04\x94m\xeat\xd2IC\xd2f\x85u\x0a+\x9a\xd7k\xd7\x8c\x1b\x1e\x96\xd5*+\xeac\xa6\x1d7\xac\xc3)!\xef\xe9\xb0iKT\x97\xb2]\xdd\xee:\xa4\xcd\xf3\xee\xfaq!7\x0ci8\xef\xba^\xd7\xf4\xab\xa8\xa9[\xd2\xa3S\xbb%Ia;\xe2\xe2\xdev\xd0\x05\x07\xcd\xaaZ\xd0T\xb6\xe1\xb8\x8aV\xed\xf2rj\xd2R\x95F\xe0\xd2\xff\xe6\xbf\xc1\xf9/\xd0\x90Fl\xfb\xaf\xf76\x06\x03AU\x8b\x82\x18\xb3\xb8o\x0e\x0e\xdb\xd5/,\xe2\x8e\x87\xe5\xf7\xe3\xc62\xcehS\xb7\xa8\xd3\xa49\x0dqea\x01\x83\xf6\xe4\xcc\x19\x93\xf0\x96\x16\xfd\xba\xed\xd8\x125\xad\xe08\xee\x18s\xc5\xa4eI\x11\xcf\xf8\xae\x8f{K\xbb\xa2\x90\xa0QyC\xee\xa8k\xaa\xe9\x97\x94\xdeo$\xbfn\xc4!e\x13\x1a\xdeP\xf4\x88\xf5}\xe7m\xc1\xba9){\xea\x82\x92\x16e\xc4\xcd\xeb\xb2\xab\xa1SF\xd8I\xef8\xa0\xac\xc5=S\x22\x96\x14\x0c\xaaj\xb1+&D\xb8\xed\xef\xcd\xbc\xf4\xdf\x02\x90\x1f\x9a!\x93\xc1\xc3\x91\x1e\xf1\xe0\x907\x04\x14\xd1a\xd9!Y\x0b&e\x0c\x9bWR\x97\xf4\xba\x86v\x1d\x9e\x94r@C\xbb\xa3\xbegAA\x8bi\xed2B\xae\x194\xeaqo[uRZ\x5c\xc5\x86N\x1b\x12>m\xcdm\x93\xfe\xc0\x01\xef*\xb9\xeda\x1b\xda\xcc:\xa4\xa0\xachTANQ\xcc\xb0M\xeb\x12\xb6\xb5(9-\xef\xb8\xbc\xb7\xd5\x05,:\xe4Q\xaf\x8bIh1\xaffD\xbb\xa8\xb2\x83\xd6Tti\x98T\xb5eT\x97k\xba-y\xc9O\xd9\x164\xeb\x8c\xb0\x8a\x94\xb8\x05\x9b\x0a>\xa9\xa6\xd6Hf\xf3\x7f\xffs\x89?\x17\x0cY\x09\xb6wW\x7f\xb5d\xd5\xb8%\x0d\xd4\xc4%\xc5\x14m9bM\x87\x8cV\xdb\x8eh*\xc8\x88kz\xc5\xb8\xb0\xa6'Qt\xc3!\x8b\x8a\xfa%D\x5c\xb3,\xa7_R^DBAR\xcc\xb4\x19\xe7\x1d7cOL\x9f\xa8\x826Em\xfa\xbc\xa9CQ\xddG\xd5\xed\x98\xf1\xb4\xd7\x1c\xf3\x94\xe7\xc5M\xd9\xb2\xab]YR\xc8\x9eny\x9b\xde\xc9\xd3\xed\x11\x00\x00 \x00IDAT3&hK\x97\xe3\xeejS\xb6\xa7iWT\xd5\xa6\x88\x165\xed:]\xd3\xaf\xa8h\xd2\x15\x0f\xb8\xa7)\xaa\xa4\xac\xdf\x86v}\x02\xee);\xddx\xe1\x9f\xdcz\xe1V\xed\xcf\x05C*\x8dZ\xe8\xb0\x82\x01\x11MU\x1d\xfamY2\xef\x90\xac\xcb\xc2*F\xecHZ\xd3cJ\xd0\x15\x1d~K\xceM7]\xb5b\xc0\x94\xac\x0e%=\xae\x0a\xaa\x0a{DQFF\xc5\x8c\x82\xba\x1b\x9e\x92r\xd1-%G\xd4-J:\xe8\xae\xa2\x80.-\xb2:\xfc\xac\x7f\xa1\xe0o{\xc4W=\xeb\x96\x7f\xe6\xa4\xc7\xfd'!\xad\xc6\xadz\xd7\x84c\xb2\x8e\x88:e\xd5\x9c\x01\xcb\xde\xd2\xd4\x90\x11\x12\x95\x10\xd6\xe6\x11e\xab\xaaR\x96L\xe8\xd3\xaa\xd7\xaa\x13^\xf7\x11]v\xed:*\xe3i\x0b\x96\x0dIi\xad_\x0c\xbd\xfc\x0f\xfd7:?4Cj\xf5g>\xbe\xf7\x93\xfd\xd6\xdd\x93\xf1\x94e\x8b\xce\xe8\xdb/\xb1\xeb\xd7*\xa7n\xd2\xb2!\xf7D\x1d\x10\xf3\xa0\x17M(:lO\xd5\xac\xb4>U\x0b\xba\x0d\xba,f\xc8\x8c-\x1d\xf6D\x1cr\xcc)Us\xd6<b\xcc\xae7\x8d\x08)#)\xa4\xcb\xacuC\xee\xd9\xf0c\xda=\xef\x88\xd3\xfe\xa9Ig\xd5\xbdj\xc2aW\xd5%\x8c\xd8\xb6b\xc0\x15\x9d\xb6l\x99\xd0\xd0g\xdd\x0e\x0a\xda\x85D\x15\xc4]\xd6)\xa0*\xedc\x22\xe6T\x15\x04\xa5=\xe1uq\x03\xaa\xder\xbf\xcb\xa6Te$4\x82\xcd\xaf\xfe\xd6W\xff\xdc\x00B\xe4\x89\xf2gB:\xb5\x0a\x9aq\xde\x1d)\x1fuY\xc3\xb6\x8ca!\xadn\xe9\xd2!\xa5h^\xb7i\x7f\xc5\xbf\xd1e\xdb\x93\xc2:\x84\xd4\xad\xab\xcb\x888iN^\x9b\x03r\xf6<d\xce\x8c\xb4\xa3v\xdc\xef\x05\x9d\xd2NzWN\xa7\xb4^I;\xe6\xd5m\x8a\x18Q\xb6\xa2\xe2\x82\x1b~N\xc8%\x8b~\xdc\x0dozX\xd8\xa2U\xe7u\x89\xaaz\xdf=\x0f\xe8s\xd9\x8c\xcf\x9a\xf2\x9e\x88\x90ee\x93\xb6<lZ\x97\x1c\xf2&\xdd\xd6\x94RP\x101i\xd9M\x1fu\xc6\xebj\xfb\xcbH\x11K\x9exb6P\xac\xfd\xb9\x00\xe4\x0b\x81\x1b\xce\xfeT\xfc\xd1\xba\xb0+\x86\xad[sL\x9b\xef\x18\xd0g\xd7\x84m\xbbj\xa2\x826\xb5*;\xe0=\x0fy\xd7sZ\x0cZpSQ\xc9\xa2\x01\xeb\x8a~\xd2?\xf3\xb8M\x9d\xb6LH\xba\xea\x9cE\x7f\xc1\xf7\x1d\xf6m\x07\xe5%\x5c\xd4k\xd2\x8c\xa8uQ\x87\x0d\x9a6\xae(jT\xdc#\x92\xd2\xa6M\x1b\xf7\xb0\xef8lXE\xc6\xa8Qay5\x05\x0dq\xebvt;\xe4mY-Bn\xe9\xd2.\xbd\xbf\x8f\xd2\xa3.&\xa8\xee\xa7]\xd0oOD\xc3\xa0\x92\xc8\xffG\xdd}FY\x9e\xdfg\x81\xff\xdc\x9c*\xe7\xd4\x15\xba:\xe7\x9e\x9e\xd8\xa3\x09\x9a\xa0dKV\xb2,#\xc0\x06c,l\xedr\x0e\x07\x0c\x0b\xec\xd9\x05\xce\xb2\xcb\x01L\x5c\x1f\x036\x06\xe4\xb5l\x09\xc9\xb6$#i4\x9a\xa0\xd1\xe4\x9e\xe9\xdc\xd5\xa1\xaa+\xe7\xaa[U\xb7n\x8e\xfbBux\xc3+8\x98S\xdc\xf7\xf7\xd5s\x9e\xdf\xf3M\xff\xe7\x91\xb1dX\xb3\xba\x16\xdb\x0a\xd5\xce\xc9\x91\x7f>\xearm_\x00\xf2@\xe0\x9a\xd3_\x5c8\x1e7c\xd0\xa6\x16q\xcfJ\x9bw\xdcM-Br\x82\x8a\x0e\xc9\x89I;i\xc7\xff\xeew\x0c\x9a\xd1\xd0kLD\x8b7D\xec\x88\x08\xb8\xebYe\xad\xa6\xf4\xbb\xaa\xee\x88\xaa\x03\xde\xf2\x98IGET,8az\xcf\xa6\xb9GIB\xc2\xa4U\x15\xe3\x1a\xda\xbc\xec\xa0\x8f\xb9\xacC\x9bw\x8d*\xbao\xc0\x13\x96\xbc\xa3C\xbfUy\xcd\xaek\x95\x11\xb5\xa4\xcf\xba6\xd7<\xaddW\xc9\x08\x0a\x22v4d\xd4\xa4%\xcd\x1a\xb2\xadYCPN\xd9\x86\xaa\xa8ME\xcd\xd6\xab\xd5\xaf\xcc\x7f\xef\x8f\xab\x7fRO\xd6\x7f\xa5\xa8\x97Q\x0f\x06\x8c\x0b\xca\x09\xca*\xf8\x86\x09'QYo\xcc\xec6(\xa8\xb8k[\xcc\xa8\xab:\xfd3\xe7\xb4k\xb5\xed\x8e?\x10\xb5\xe1\xac\xa2\xd3n\xe8\xd7\xe1\x8e\xbb\xb2b\xb2\xba\xad\x1b\xb6l\xc1c\xde\xd0\xed\x8f\xcd9\xa2\xd9\xa4\xa8\x8c^5\xb75I\xf8\xf16\xbfYEF\xce!w\xfc\xa6S\x82r\x86\xe4T\x1cr\xdf\xdb\xde\xf3\xb0q%#N\x9b7jY\xafMI\xab\x9a\xdc\xf6\xb8\xf7\xa5\x1d\xd1dZ\x01\x11M\x9aEm\xe9\x97@X\xcb^\x22]\xaf]\xad6m\x19\x95\x12\xab\xd5\xa2O\xfc\xe1\x13\x0d\xf6\x09 \x11\x84z{\xcc\x0b\xcb\x09I\x19\xd4)`\xc5\xe9\xda\xf9\xd5\xf2\xd5Z=\x22i\x5cB\x0duu\xb3B\x82\x86\x0c\x186\xa7\xa6\xa2.`\xd4\xba\x8f\xb8\xaf\xa4I\xab\xb4\xacv}\x8a^\xd0#\xa7\xe0\x19[N\x88\xb8fLVT\xbf+\x06\x8d\x89\x8b\x0a\xe8qP\xc35q\xc7\x84}X\xc6\xac>\xd3r\x8e+\xd82n\xc2\xdfw\xd0\x1f\xb9k\xd3\xb2\x87\x94<\xad,\xab[B\x93.U\x83\x1a\xd6\xd4\x14\xb4\x09h\xd7\xaa\xa6\xa8$\xef\x80\xac\xbc\x826\xf7D\xackWr\xd1\x96\xbae\x9b\x8d\x8e\xc6\xaf\xbf\xf2O\xeb\xfb\x06\x90\x06\x1a7V\x05\x14\x84\xa4\xb0bC\xc9\xe3\x8d\xf9\xd0\xcf\xfc\xbd\xc5\xd5J ,jNDQ\x9f\x1d=\xba5\xeb\xb6bZ\x87Q\x87\xad\x09\xdb2cQA\xc0\xba\xb0\xa8V\x9d.Y\xd1i\x8c\xbd|\xf4v%S\xa2\xd6\xd4$\xdd1\xa0h\xd3\xa6m\x1d\x0a\xca\x0a\xc6T-\xeb\xf2\x86\xa8]3Z5\x99\xd6&h\xdd\x19\xbf\xe2\xcb\x9e\xd7mB\x87wm{U\x8b\x84\xa2>]\x8eYA^\x8b\x9a\xa451\x1b*\x02:\xb4\x99V\xf6\x94\x90\xba\x88\x11wt\xd8p\xca\x15\x09\x05\xa3B\xe1\xee_\xf7'\xfa\xfb\xaf\x04d\x17\x99R\x93\xb8\x9a\xa8\x05mR\x8e;j\xc9\x86O|5\xd5<b\xcb\xb2\x80SZ\xccXTR\x12p\xc6\xb8\x9fu_\x8by\xd3fU\x1c\xd1\xa4E\x8f\x985EEU\x1f\xd5$,k\x1d\x1f\xb7.m\xde\x989-Z\xdd\xf1\x98M\xebb\x22N\xf8\x96a\x1b:\x8d\xba\xe5\x92\xa0)G<\xa1\xb4\xe7\xb6\xdb\xe6\x98k\x16}H\xafkj\xfe\x86\x17\xc4\xec\xfa\xdf\x5c\xf6\x9c\x90\xbceW\x0d\xd9v@M\xc4\xa8.\x19)\xebZ\x0d\x0aZ\xb3\xa1_HL\xd5\xb2N'\x85el9\xa8\xa6d\xd03\x7f\xeb\xe1\xc8\x9f$ \xff\x95\xa2~\x97\xf0\xa9\xf4\xc2/\xc4\xd5qPJ\xda\x92\x88G\x02\xf7V&\xff\xf1\xd3\x9f\x9d=9\x1a\xa8\x89\xd9\xb0c\xc8AE\xbd\xce\xdaQ\xf5\xa6\x88\xab~Z\xab\x07\xe5\xbd/\xeb\x9e\x8a\x01\xf4X\x170\xab]XL\xc2q\xbfa\x5c\xcc\x80Y\xdd\xd8r\xdaU\x07\x9c3)%/&, o\xdaq\xcfy\x15)o\x89y\xc0\xa6^]\x965\xa4l\xba\xaf\xe1\xa2\xef\xba\x80\xb0?t\xc8-U\xbd\xb2\xc6\x15D\xdcuB\xd8-4k\x97Q\xb6%\xac\xe4\x90\x8fy\xd7\xb6\xa4~S\xea\x96\x84\x84\xa5lQ\xce\xfe\xce\xd7\xbe\xf6\xf9\xfa\x8f\xf6\x0fC\x10\xaco\x1f0\xaf\xa6d\xc7M;>\xe9\x03^\xd1\xff/\x1b\x81L\xa5lAH\xd5\xc7%D\x15\xe5M\xf9\x9e\x97U\x15\x1d6\xed\x0d/\xf8\x03;j\x1e0\x8e\xb8\xa2-\xadv\x9d\xb0c\xd3\x9a-\xd7\xfdc\x05k\xee\xeb\xd4#iK\xdeC\x02^QC\x93_4aN\xc1\x90!\xdf\xd7\xe5O\xf9\xba^\x0f\x9b\x90T\xf0\xba\xeb\xfe\x86[B\x1e\xd2\xe5{v\x94|\xdf\xaa\xa7\xed\xe8Q2\xe3\xac\xaa^eG\xac\x997\xa2\xae`\xc6\xb0\x1e1%u?\xf2\x7fi\x16V\x91\xd7-\xeb\xb0mc?\xde'\xd6\xceo~6\xf0\x7f7\xf6\x11Cx:\xb6Pm\xfa\xcb\x04\x82\x11]\x8e\x8a\xbbm\xb0\xde\x15\xc8\xfe\xda\xa7&\x7f\xeb\x13\x91\xb3\xd1\xc0\xb6>\xb3\xca*\x22\xaa\x8e\xbb\xeb\xa2\xab\x06}G\x9f\xc7=\xa7hR\xc7\x7f>;\x0d\x8bH(\xda\xb0\xe0\x90\xa0.1W\xf1\x80)Yi]z\xdcQ\xb1\xe3\xac\x1dm\xd2\xb6,k\x91U\xd0$\xac\xc5\x9aQw\x94D\x0c\xe9\xb0\xebc\xfe\x81\x13\xba\xe5\x84\x9d\xd4c\xd3G\xd5\xdc\xd5\xadj\xc7iW\xd4\xad\xca\xda2$eKU\xc0y\xef\x19T\x16@\xd4\x87\x84\xad\xaak\xb7$\xac,\xaa!\xac(\xda\xa8\xcc}\xf5?~2x\xbb\xb1\x8f\x18\xd2Y?[\x8b\x16\xfb\x1b\xab\x86\xe4\xdd\x94\xf7\x98{\xd5\xc9\x9d\xbf\xf0b\xa0^rRQJ\x87]i\x0ds\x1e\xf4\xbe\x8f\x19\xf0\xb7eD\xb4\xfa\xb2w\xec\x98\xb7+*cH\xc2\xaa\x05\x0d\xab~\xd2\xd3\xee\x0b\xc8zPN\xcc\xcb\xda\xf5\x89\xdb4\xeb\xb0\x80fWm\x8bzV^\x93\xaa.\x03\xeei\xd6\xed\xfb\x9a\x1c\xb5\xae\xcfM\xef\xeb\xf7C\xbfdMCQ]\x93\x86\x92_\xd7\xaa\xdb%%\x9f\xf6#]\xca\xfa\x8d\x08\xd8\x92\xd6\xabM\xc9[F\x5c\xf3\xb8,\xaa^\x142\xac*#h\xc9)Qe\xeb\x0e\xca\x05\xb3'\x9f\x0bm\x05\xf7\x15CF=X\xba\xf6\xa7\x0a\xad\xc1HI\xbb\x8c\x9c\xb2T\xa8\xe3\xdb_\xfa\x0a\xd1\xcf8\x1d\x0c\x14\xb5Kk\x12\x102c\xd0\x06\xfe\x8d\x9a\x8a9\x8f\xc8\x18\xb4c\xdb\x96.w\x0c\xecy\x22T]\xb1\xadY]\xbf;\x9a\xa5\x0dZ\x93\xd5\xa1\xa8OZ\xc6\xaaqk\xea\x82:-\x09\xdaT0n\xd0\xbc\x8f\xbb\x22\xaafM\x87\x11\xad\x96\xa4\xf7ln\xc6\xd4\x0d\xc9:\xed\x8f\xc5\x8c\xcb\x9b\xd4e\xc1\x90%m\xb6t\x0a\xd9TUq\xd0\xae\x84m!u=\xba\xc4\xf7\xbc\xef\xba\x844l\x1a\x12\xb0!\x15($\x9b\xfeQ!0_\xdbG\x0cy\xa6\xf6\xb7\xaa\xebk\x9b\xb5^M\xe22\x1e\xb6i\xd5\xd6\x1f\xc0\x91\xe6e5]\x12\x0e\xcaj\x16\x124\xe7\x86m\x17u;\xa5I\xafI\xbfiG\x5c\x87m\x093\xce\x89ZR\xd4\xa9Y\xc5\x80\x1d\xeb\x9a\x1d\xb1nD\xab\xa0\x01w5+\xebT\xd5a\xd1\x15[:\xe5U\x8dI\xca\x09\xbb\xebAU\x03\xba\xf5\x88{\xcfq\xab\xf2\xfa\x05L\xe9q\xdf\xe3\x86]0gI\x8f\x9a\x82a\xb3*\x96\xb4*\x8a(8`WY\xcea\x0bj\x122:\xac\xcb\xab\x09\x0a\x8b\x9b\xd2$ \xa3.\x19\xc8\xb7l\x07\x8a\xfb\xa9\xec\xe5\xaf5\x18\xd8\xee\x0aN\xd9u\xdd\xa7\xdc7f\xfc\xeaW\x7f\xf7\x17\xa3\x97\x02\xb9\xa6\xc1FE\xde\xae\x8c\x82mae\x9c\x93\x14\x11\x95\xd3\xe2\xdbN\xf8\xb8\x86\xa2\x92\x92n\x09A\x19a]J2\xb2\x1a\xf2\x0e\x88\x0b\x1av\xd5\xb2\xa4\x1dM\xea:UlY\x97p\xca}eU\x9d\xee\xc8hsZ\x9b\xff\xe4\x80;6E\xbd\xe6\x90I\xa3\xd2\x16\x8di\x97S\xf6;\xbe%\xa8I\xb7u#\xba\xa54\x0ck\x92\xb1lY\xd4u\xa3\x92\x86d\x0d\xa8)\xeb\xf4}\xed\xda\x9d\x12\xd0\xd0'\xa2$+n\xc8\x9a\xb8\xde\xa7\xdf/\xef+@\xe0\xb1\xdf.$\xa3\xf5\x13:\xbc#\x5c}M\xf1K\x0f&&\xaa?\x1b\x0c\xb4l\xcb\x89\xaa\x9b\xd0\xaaaS\x8f\x11\x1b6\xac\x98tG\xd0G\xb4\xc8\xa9\x0b\xb8\xe7\x90k\x86\xfc\xb1\xe7\x15,\xab\x0b\xeaqKJ\x5c\xc8\xdb^\xf2\x11-\xde\x95t\xcce]:\xa5\x85\xc5\xf1\x9ci\x11\xbb\x8a\x1e\xb6\xe4\x05\xa7\x1d\xf7}g\x94\xdc\xf6\x94M\x07\xcd\xf9e\x19\xef+Y\x90\xf0\x8bZ]\xd1k\xc2\x03*vL\xe9\xb6aV\xabq\x01\x9d\xe2\xf2r\x0av-h\x95R\xd6\xe6\x15\xed\xe6\xb4X\xb1!\xa5j\xcb\xa6M\x05I\xe1\x0f\xfe\xc96\x86\xff\x8dv)O5\xb2\xf5R\xb0W\xb4\xb1S\xef\xffO_\xff\x04\xcf\x87\x8f\x06_\xba|\xe4\xd8V0\xaf_\xd0\x0d'\xecX1f\xcd\x93\xb6u\x98\x942\xed\xa25\xd3\xea\xc6\xcc\x0aI\xc8(k7dS\xda\xae\x11\x19\xa7\xc4]\xf0\x82\xcb\x1e\xd6\xac\xd9\xf7\x0d\x98\xd0\xad\xd9,>\xa8\xe0=5aqU\x1f\xd4\xef\xbaU\x1f\xf5\x92\x92\xc7\xbdg\xdbS\xeez\xdfO\xaaZ\xf7\x9c\x19\xf3J\xa2n\x19r\xdd\xa0\x80Vs2\xfa\xcd\x8b\x89\xea\xb0*\xe0\x90]i\x87ee\xa5%U\xd1mJ\xbb\xba\xb0\xb0\x8a6\x01\x09\x13:.\xfd\xe8\xa1}\xc7\x10\x86\xfe\xeenp\x5c\xdaB\xa0T\xf8\xf9\xcf\x0b\xb0\xd4x\xa1\x11iLh\xd1\xef\x88m\x11\x1b2\x0e\x9a\xf1%\xad>\xe6\xb6\xb8U\x0fJ\xf93>`\xcc]\x05%\x93.\x8aX\xb5c\xd2\xaeGd\x9cvS\xd0\xbf\x13\xf0Y\x15\xaf\xfb\x81/Y\xf2\xa8U\xb7\xb5*{\xc5#N8\xad`\xc5\xa3nx\xcb/8\xe1\xd7$=\xe3+\xe6|\xd1\xb7\xc4\xfd\x0do\xf8\xa1\x87|[\xc1\xe7u\xba\xab\xc35?%\x22\xe0\x9en\x1f\x90\x93B\xca\x8a\x80\xba\x19\xeb\x22\x96\xac+;'\xa0Y\xd9\xa4\xcf\xca\xaaIi\xd7o^\xc4}\x91J\xfb\x83\xfb\x90!\xa7\x82\xe7F\xe6_\xdc=\xd8n\xe0\x85/\x7f\xf8\x99\xf0KU\x9e\x0b\x1f\x08\xbfu\xbb\xeb\x80\xe0\xb2\x8bn\x8aI\x08(\x19\xf4\xa2\x9fp\xcf\x11\xd7<\xe3\x87>a\xc7w\x8c\xa8\x08J+\xe8\x94T\x11\x97Ps\xcd\x115m\x82\x8e\xbae\xd1\x80\x98n_\xf5\x9cYU\x01\x1bjN\xca\xcb(\x09\xc9{\xd8\xb03~O\x976\x97\xad\xf8\x8c\x9a\xb7\xfd\x8c\xaf\x88:&j\xcb\xa8\x1e\xbb\xbe\xe6\xac\x97<cZC\xc6\x057l\x08\xaa(\xebT\x92\xd1$*&,(\xa4a\x1d\x9d\xd2\xca:\xb4\x9b4hGQ\x8b\x0e\xacT\xeb\xe1\xbe\x07\xbf\xf9\xde>c\xc8\x8d\xfa\xefL\x8f\x1f?864\xfe\xe5\x0f\xffJ\xe0\xa5\x1f\xef\x06\x02\xa5PS<k\xd7\xc7,8gV\xab\x8c\x94\x9b>/\xee\xef\xdb\xf1\xf7\xfcP\xc4\xcb\xd6\x5c\x144`\xc3auA\x91\xbd\x80\x98\x90\x1e\x15\xcdV\x9cs\xdd\x0e\xa2V\x84\xfc\xc0k\xd2\xe2Z\xb4!\xe2s\x9a\x84\x14tJ\xd8\xf1u\x9f\xd6\xec%\x1d.z\xd5\xab~\xc2?\xf3\xa4\xb3\xd2\xe6u\x89I\xd8r\xc1\x15\xa7\xdc6\xab\xa6\xe2\xba\xa4sv4\x8c\xa8+\xea1\xa0\xdf\x86\x9c\x90fe\xe7\x85U\x0c\x19\xb7n\xd5s\x1aV\xb4\x08\x0a\xc8\x0a\x05\xc7\x0c=\xfa\xf3\x7f\x82\x9dH\xe8\xbf\xf5\x8f\xad\xaee\xa6\xb7\x7f&\xf8\x1b{\xa3\xe8\xf6\xd0l8\xf9\xbffS\xa3\x81w=dZX\xdeY\xd3\x1eq\xcb9\xdf\x95\xf4\xb2\x98'\x8c\xd9\x91S\x96U6\xa3i\xef\xd8`IM\xc4\x16:\xe5\x0cj\x16\x15Q1\xe7\xb0\x7f\xe9\x94f\xa5=>\xad\xba#\xa0\xa8nW\x8f\x94a\xab\xbekH\xca\xae\x13\x0ex\xdd\xa7|M@U\xdc\x01%E\x9b\x86e\xccK\x09\x89#bKQr\xefV\x7f\xd4\xa6\x82\xb0\x86Q9I\x8b\xe6E\xf5YT\xd6\xae\xcf+\xca\x8e\xdbFM\x8br _\x9c\x9a\xb9\xf7\xdf\xc9\xf9\xe7\xbf\xa3\x86\xf0Jm\xa1\xb6P\xff7\xff\xb9E\xda\xf4g\x9aJ\xad\xa9\xc0M\x1fsWH\xdaA\xf3\x06\xcd{^\xc5y+\x0e\xd85\xa3_\xc1\x88\x98\x92V#\xb2\x06LI\xea\xd1o]RBP\xc0\xba+\xeaNJ\xfa\x92y?%\xed\xb6\xe3bzlH:nVUHR\xcd\xb6\x98m\xe3\xbaLh\xb6\xe9\xaaC\xde\xf5q\xbd~l\x8c\xbc\xa5\x82\x0dyM\xb6de\x8ci\xd6\x22\x8c\xac\xbc\x16\xeb\x0abF\x15\x1c\x13\xb3e\xd4y%\x19\x05EeW\x1dSvGP\xc4\xae\x82\x9cR\xa8\xe5\xe1\x7f\xdf\xf8\x8b\xe1}\xc7\x90\xffB\xe6C\xb7\x9aK\x7f%\x14\x0c\x07\xf2zUU\x95u\xca\x0b\xeb5\xa3I\xc6\x8c\xb8~\xcb\xba\xdcP\xd3bS\xfa\xc7\xfb\x15\xbd\xe6\x84m\x0a:g\xd1a\xcdN{\xc7\xae^\xb7%\x95\xf7\x22#b\xd6\x8cI\xca\xea\xd0.+\xae\xc3\xa0y\x07,[qv\xaf\x10\xbea\x5cPZX\xd9i%\x09\xab&\x14m\x19\x97\x92\x93\xb4\xa5IE]DHB\xaf\x922\xb2n\xea\x91\x905\xa4`\xc7!u\x015U5\xa3*\xb6\xf5\xdaV\x91h\x14R\xe9\x7f\xb2\x16\x9c\xa8\xed3\x86\xfc\x17\xe7A:\x02\xc3:\xb4\xab9\xa2\x22,dGE\xd1\xeb\x06\xdc\x90\xd0)\xa0,\xe0\xb6S\xea6t\x08\xa98*\xec\x1dC\xfa\x0dh\xf7\x96\xb0\x09\xddN8\xee\x80\x1b\xae)k\x95rWY\xbb\xfe\xbd\x04\xe9Mk:\x14\xd5u\xea\x13\x11\xb2\xe1\x9e-9\x0b\x86\x14\xdc\x12\x16\xd5eW\xc4\x11=\xce\x8a\x1b3/.*eG\x9f\xbc\xa2\x82AiY!=r\xa2:m(;\xef\xfbZ\x1dw]Q\xd6\xb0\xb8G\xdc\x10\x173\xeba)\xb5p\x7f\xe7\x9b\xe1\xd5\xf0\xbe{\xb2\xfe\xcb\xe5n-\x9a\x0eG\xd4%\xbc\xa1\xc3\xae>\xbb\x0a\x06\xfd\x9c\x9a\x0b\xe6\xd5\xecJ\x088m^\x13\xe6\xb4i2i\xcd\xb8\x907\xb5\xb8\xe7\xb8\xa2\xa3\xb6\xfd\x96V\xf7\xb5\xfa\x15\xeb\x8e{O\xbb\x83\x16DU\x8c9\xa9\xcf\x9a\x9cem.\xcbjuJ\x97\xbc!9\xd75\xb9\xe6I\x19C\xea\xee\xcaZrW\x8f\x16\x15\xc3\xb6\x0c\xe8\xd2\xeb\xb6\x01]\xaaf\x1c\xd1\xa2\xc9\x92^\x0d\xed\x06}\xd87=-\xa9\xa2SD\x97\x15}ni\xd3\xa6\xd9\x01\xd7T\xd5m8?\xfazq\xdf\x03\x92u0\x5c\x12\xb5+\x82\x0d\x01\xf7%\xf5\xab\x9a4\xef{\xca\xb6<.\xed)\xaf\xa9\xaai1\x22\xa7\xac\xea\x82\x1dY-\xae\x1a\x15v\xc0=\xdf\xf2\x11E\x8f\xaa\xfbC\x1d\xfe\xb9\xbfkTQ\x9fuMr^q\xdbI\x19]v\x8ck\xf7\x92\xabB\x8e\xban\xcc\x07L;\xedu\x9d&mI(\xba\xe5\xaf\xc9([0*%\xe4\x05\x87}\xce-S\x02\xc6M\x992j\xd8\x0d\xcd\xc6d\xfd\x9e\xbf\xaa\xecUW\xfd\xe2^b\xfc\x1bJ\x1erI]\xb3N%AA\xdd\x8f\xaa\xed{\x0d9\x15Z\x19\x88\x7f\xb1'\x90pP\xdd\x82\xc3v\xec\x0a\xd9\xd4\x22\xaaU\xc3\x80\xf7\xf5\x98w\xc6\xb6\xaa\xb2\x0e\x15K\x92V\xf5\x99wJ\xd4\x9a\xa2f\x09\x1fqS\xc2=!3\x9a<\xe7\x8f\xac\xca\xa9\xeaU\x14\xd5i\xc8\xac\x1e!y\x075\x8c9\xec\x9e\xbb\xda-\x9b5\xe6{N\xe8\x17\xb7m\xddg\xb4\xb9\xae \xa6h\xdb\x94\xc3\x06\x94|\xcbE\x1d\xa6$\xf5\xed\xf5KUEU\x17\xc5|M\xd0S\xba\xfcG\xe7m\x0b\xeb\xd7\xec\x9a\x8b\xb6\xf72I\xe2\xa2&\x83s_\xd9\xf7\x80$B\xf5\xce\xc1\xbf\xb4\xaenNB\xdd\xfc\xde\xec\xf6\x19)\xc3\xfeH\xc8\xac\x07\x0dx\xdc7\xb4(\xdb4lZR\xbb\xd3\x16U\xe5\xed8*c\xdb\x03\x9a\x1d\xf0mc\xea\xbe \xe4\xb2^\xeb\x22\x16\xec\xaa\xe8\xd6\xa6\x22#c\xc6g\xfc\x10\x1d\xa6Eem8g\xc5\x8a_u\xdf\x9a\xaas~\xda\xaf\x99\xd6)eG\xc2\x92V\xf7l+\x88\x99\x11\x90\xb2\xa8.f\xc7\xac&m\xc2.\x8b\xf8\xac\xb7\xacY\x15\xd1.a\xc9\xacS\xf26\x8d\x9aUCUD 4\xf5/\xf6= \x1f\x09U\xfa\x0b\xbf\xd4e\xc5\xb3\x16\x10\xd3cG\xca\x86\xba\x92\x03\x0azM\xa8x\xc7\xc7ed\xa5\xdc\xd5d^D]QDB\xd6\x1d\x0f\xe0\x845\xf7\x9dt\xd9)?\xb2\xe5qU\x03&5\x19\x95\xb4,\xab\x80f\xc3\xd2\x9e\xb3\x89!iEQ\xed\x82>\xe0\xf7=\xee)\x05\x93\xb6\xfd\x9cY\x11\x8b\xba]2bS\x87^\xddf\x1dV\x14W\x13\x175\xa3\xd9\xaa\xa7\xad:\x22\xe8\xba\xaaG\xdd\xd2e\xce\xba\x9a\xd3f\xb4\xc8\xea\xd6,fU^\xa2\xb1a\xf1\x1f\xfe\x9d\xfd\x0eH586\xd2\xff\x0b\xef\x19\xb4i\xd0\xb4]I\xcbz\x85}\xce-\x19;Z\x9c\xd0\xe6\x11\xffV\xcc\x94Qm\x0a\xfa\xdc\x15\xd5.`AJ\xd8\xa0\xba\x97\x8ch\x15UsI\xaf&C\x16\xdc\x16T\x96tUD\xd4\x83nk\x92QpF\xcd\x96y\xf3ZU4\xa4\x1dp\xd4-o\x08\xe9\xf4\x92u\xddzD\xcc\xc8\x0a\xaa\x1aWv\xdf\x88Y\xc3\xee\xdbF\x5c\xa7i\xe7\xdd\xd7P\x12\x96\xd5\xb0\xe1\xb4%c\x22B\xd6\xf4J\xeb\x97UT\xb0\xab.^?\x9c\xfa\xb3\xbfSJ\xefs@\xda\x03\x81\xf1\xb5\x9f\x1b\x967\xa2\xdb\xb2\xa4\x15\xc7mHz\xd7!\x11-\xb6\x85l\xa9\xfbI9\x91=\xcf\x85E\xe3v5\xa9\xc8\x88+)\x89xZ\xdd\x88\xab\xaa{\x1f\xe6\x14\x8d\xc9X\xd1c\xca\x19\x8b:\x84\xec*\xe8\xd0\xa5\xd5M\xc7\xb59\xe6\x865\x9d\x0e\xb8l\xc5Q=\xeek\xd5\xa1\xdb-g\xdd\xb3\x8d*\xb6e4\x14\x0d((\xd9\x95\x94\xb6\xeci\xd74i\x12UwP\xab&\xf7\x84,)\xab\xea\xb0\xa5Y\xdd\xba\x0e\x9bR\xb6\x84\x94\x02\xdd\xdf\x99\x99\xdc\xe7U\xd6\xbdZ\xff\xd0\x80Ug\xc5,9`\xc1aS\x22\x8e\xfb\xab\xe2\xbe#\xa3\xea\xbaG\xb4\x88\x99Q\x96WQr\xda\x9a\xa0Me\x17\x84u\xa9\xe8\x914\xe0\xa66c\x1a\x92*\x9e\x90\xb5%\x22\xa7\xd7\x86\x83\xea\xd2\x0a\xaa\x9erP\xc3\xae\xd7\x1c\xf6#u}VM\x1a7f\xd9\xba#\xe2\x0a\x1ev\xc4\x15E_\xb4\xee\x80\xba\xbc\xb8\x16\xa7\xc4\xac\xab\x09\x89\x1a\xd7\xe7u\x1d\xb6= \xa1\xe4\xb25k\x22N9g\xd8!\x9dV%\x15\x8c)\x88Kh\x92\x0b\xec4F\x1e\xdd\xf7\x1a\xc2\xd8G\xb7\x9f\xcbY7`FXI^LJV\xc4\xbc\x94\x8c\x8a\x0f\xeaT\xf7\xef\x9d\xb1\xa0GR\xcc\xb4\xb0&\x8b\x9aT\x14T\xa4\x84\x8d\xc8;n]Q\xd0\xa8)\xbb\xca\x9adE4\x84\xad;mAX\xc3\xac\x82\xb2\xd3\xc6TD\xcd(\xeaq\xce[\x86\xcc\xea5i\xc7\x19\xb7\xac\xfa\xa0\x86\xdf\xdb[\xd8\xc6\xc4t\xf8\x81\xa0\x83\x16\xd45$\xf6\xbc\xaf[\xbdhD\xc2\xae\x84\xaaa?\x10\x13\xb3bG\x97\xab\xc6\xadY5dQH#\x10n\xe4\x96f\xbe\xf1s\x81\xab\xfb\x97!?\x13\xa0v \xa3\xd9\xa7\xdcu\xd0-\x0c\xeb0k\xc4\x8b\xd6\xb5\xfa\x9c\xba\x1b\xbe\xab\xec\xb7\xbd\xed\xb8-\x0b\x96\x9dW\xd4\xe6ak*\xea*\xceJ\xda\x90\xf1\xaf\xecZ\xb6\xe9u\x8f\xfb\xa0\x97M\x8a\xdapH\x87-%G\x15\x95\xd5\x8c\xeb\xf4\x09\xef\xbb\xa4\xa4,n\xc9k\xba\xbc\xe3\xaf{\xcb\xa3\x02^7 \xe1?\xb8\xe9\xff\xb3hE\xbff+\x16\xfd\xaa\x9cK\xc6\x15\x14,8\xa8n\xd6\x82/\xc8\xb8\xad\xd7\xae)\x13~ZP\xd2\x88>\x9b\x9eqC\xc3\xcf\x8a{^ZP]\xb8\xf8\xdf\xbc\xdb\xfb\x1f\xc3\x90s\x81\xeb\x1e\xfc\xe5\xc6\x91\xbc\xab\xce\xdaQST\x94q@\xde\x07\xad\xa9\xfa\xb6\x9fPv@\xceW}\xd4=\xc32\xda\xd5LH\x99\xf2\x847\x0d\x08\xbak\xd8\x0d]>\xa5\xea\x90\xdb\x96\xb4\xf8\xa6\x7f\xe0\x87\xb6\xf4H\x0b\xa89\xe5\xf7\x0d\x08h\xd2$\xec\xaa\x90\xc7,\xd8\xd0\xa1\xea\xb8v\x0d\x7f\xe8A\xafj\x15w\xc0\x8c_\xb2\xea7\x1d\x142i\xd9A\xed\xfe\xd0O\xa9Y\xd5\xa6Y\xb3m)'\x05\xbd\xadG\x9b\xfb\x1a\x0e\xc8k\x88\xb8\xa5OE\xc8\xab\x9e0\xe6\xf7\xb5\xb8o@\xa1\x91\x0b&\x7fo\xfa\x8d+\xfb\x19\x90C\x81\xdb\x86>\xd7|\xac\xe2\xd3\x96\x15\x14-\x08K\x1a\xf7\xac\x0d#\xae8i]\xbf\x0f\x89\x9b4kY\x97c*&u\xa8\x08\x9a\xf1\x88\x15\xcd\xc6e|\xc8\xf3\xbek\xd7\xf7\xfc\xac\x0f\xfbCG\xddQ\x13\x91\xb0\xa8\xa8!\xad\xa6lWH\xb3\x8c\x1d\xb4\xee\x99\x1fo\xe94e\xc9\x11%\xcf\xe8\xf3\xfb\x9a\x8c\xcb+:\xe0eI\xf7\x1d7c\xc7E\x7f b\xdc\x84\x88m]6\xd4\x15]t\xd7\xaasb\xd6U\x15\xdd\xf3Y\xdfT\x95\xf3\x80K\x0a\xfa%\xcd)\x0a6\xca\x81\xa6\xdf\x98\x9e\xd8\xd7\x1a\xf2X\xe0\x8a'?\xef\xd8\xac\xfb\x1eDV\xc5\x011\x1bf\x8c\xb9\xafMI\xcd\x8c~K\xe2\x92\xda\xdc\xb5kM\x97\x05\xad\xc2\xa2v\xf5khx\xc8\xac\xf7\x9d\x15\x141\xe7e\xcfJ\xb9`\xc6\xaa6=\xb2\xc6,\x89k\x92\x92\xc6\xd3\xd2Xw\xd0-\x07\xc4l\x19wVQ\xdc\xfb\x02\x9eq\xc2\x1b:4\xbbb\xd8u\x17\xa5\x8d\x08\xa8\xe9\x915\xaf\x15IUC\x06\x15\x05\x0dj\x91\x16\xd0#/\xae\xc3;>f\xd7\x92\x15\x17\xac\xdb\x92\xd2\xac\xac\xaf\x91\x0d\x0c\x7f\xe5\xe6\x9d}]e\xdd\xc1=\xefj\x97\xf4\x96uT\xe5-:hG\xdcQ\xab\xbalH\xf9\xba\x19i\xed\x0e\xef\xe5\xf1\x94\xf4\xc9iW4f[\xc21\xd7=\xe2\x82\xaaMk:\xd5]\x11t_AQ\xcd\x8e\x94E\xcdJz\xack5`E\xcaM\x19\xdf\xd5m]N\x8b\xb2[6d\x14\x94]w\xdbG\x95\xdc\xd0\xef\xaa.74Tm\x9a\x94\xd5\xad\xcd=\x0d\xc3\x1a\xb2\xbe\xe3C\xb2V\xa5\x0d\x09\xda1$f\xc8Qy\xb3\xceIZt\x5c\x15[\x92\xd2\x8d\xa0\xf8\xe6>\xaf\xb2\x1e\x08\xdcq\xee\xf3}\xc7\xe6\x1d\xd5+o\xc3\x8e\xb0^\xb3\xfe\xbc)iA9'\xd4\x5c\xd4c\xd5\xba\xef)z\xda\x869UUQ\x9d\x82Bf\xf72\x06\xdf\xd6a]\x9b\x9a\x84\xb06\x1b\x9a\xac\x0a\xaa\xd8\x15\xb1\x82\xa2\xac\x86C\xb2\x8eZ\xf29-f\x15E\x855\x0b\x08\xb9\xed\x01\x9bNXS\xb5\xe8\xb45\x07-\xea\xd5\xaa\xacY\xa7&KR\xbaqCH\xd8\x80\xb4\xaaSV\x0d\xba\xaf\xd3}\x1d\xeeKK{\xde\x82\x8a\x86\x8c\x84#\xd2Bb\x8d\xd5\xe0\xd4\xaf\xef,\xefk\x86\xfc\xd8\x5cl^\xbfI\xdcw\xc2I\x87\xad\xfa\x82u\x1b\xae\xeb5`[Z\xd1\x1b\xae\x09xV\xdc%\xabj:\xfc8\xf1l\xc1\xa4\x16;x\xc5g%\x1dP\xd2\xae\xe0\x88\xaaU\xab\x0e)+9cS\x5cm/\xe0\xf1\x86\x11y\xdd\x0avTU\xb5I\xab\x09\xc8;!d\xcb\xa2\x90\xb2\x13\x8a\x12&\x9c\xb2a\xc3\x92a\x8b\xb2\xdaU\x14\x14\x9d\xb0%\x89i'\xdc\xd4\xe4\xa6\x90\xbccb\x8e;+jR\xd0q\xf4\x0byKC\xb3\x0d\xad\x1e\x0b\xfcI1\xe4\xbf\x13 %\xe4\xa5\x14\x0cH9\xe7\xaa\x1dW\x8c\xb9\xe4\xa6\x0f\xe9\x90\x15wG\xc3\xbcc~\xc1\x8e{\x06\xc4\x0d:\xe5\xb6\x16\x9bjRZU\x84\x1cv\xc6\xebrB\xd6\xec\x18\xf7\xa2\x94#b\x82B\x8a\xe6\x1dT\x12\xd2!\xa2\xe4\x80\x1fxW\xd8+\x9a\xf6z\xea\xa3n\xaak\x88YpD\xdc\x88\xb2C\x164\xb4\xbbjL\xbbnoi\x13\xb0\xa4U\xd6\x8a\xeb\x9aD,\xe9\xf5\xb2\x1d\x05)\x87lYr\xcb\x0d\x01Q\x19A\x13\x126\xa5\x1dSt\xc7\xe1\xbd\xbb\xad}\x0dH\x0bz\x1d\xd3a\xcd\xb4\x86\x86uIU1\x7f\xde-\x03V\xbc\xa2\x1f\xcfhs]QP\xdd\xbaN\xf7\x1d1+%&\xa1f\xdb\x8c\x0d'\x9c\xf0C\xef\xf9\x92\x1bn;\xedm\x9f\xd4\xe4\xa6m\x03B64\x94\x95dD\xdd\xf4\x97\x1c\xb5b\xc3\x03R\x9a\x5ct\xcfIOhxC\xab\x01)3\xc6\xfcCE\x7f\xc7\xa2\x90U\x05m\xe2\xa6\x0d:\xe3\x8a\x98O\x89i\xb1,)\xe1)=\xda\xb4Y\x162\xe2\x19A\x0bR6E=\xb9w\xd04\xe1\x90vw\xd4\x5c\xb7\xcf5\xe4\xa9\xc0\xfb.|\xf6\xed\x13a\x1bRj\xd2\xda\xe5\xc4t{\xcd\xa7m;'\xa9S\x875S\x12\x0e\xdbVs\xc6\x9b\xc6m\xdb\xd2\x22m^\x18gmjr\xc5\x17\xcc\xfa\x9egm\x8b8\xe6\x05gl\xc8\xc9\x18T\xb4\xab]@@H\x97Y3\xba\x05,\xdb\xb1\xa9I\xc2\xae\xff\xe41aY9e=\xfe\xc0/k\xf1\x97uhWFP^\x97-W=\xe6\xb6\x8c\xb0\xaa\x8c\xa3B\x8eX\x95wK\x97\x93n\xbbj\xdc\xa87\x9c4\xeb]\x07m\xc9\xedyK\x9ek\xcc\x07\x1f\xfe\xd7W\x96\xf75 \xef#2\xde\xf1l\xb7\x88\x8f\x0a;`\xc3\xb0\x01G\x0dyG\xd9{r\xe2j>h]\xd1\xa2M#\xbe\xef\xac7\xb5\x1a\xb1hDA\xd9\x09\xc7d\xb4\xea\xf5\xaen\xefj\x11R\x17\xd0)m\xc7\xa0M!\xd7}\xf8\xc76\xf82Hx\xda\xa4\xc7<\xe6\xaeMQ5\x1b\xc6\xddSw\xdc\xaa%\x9d~\xc3\xbfSp\xd4\x88k:\xcc\xab9\xef\x072\x9es\xc9\xf3n\xab\xc8\x1a\xb3!\xad\xa8&\xe7aw\xdd\x17\xd7\xeb\x8a%\xcf\x9ap@\xd6\xa6\x88\xa4\xb4^%7\x1a\x07\x83\xbd\xbf\xf5\xee\xe2\xbe\x9fe\xf5_\xdc|>\xac\xe2\xaeUWD\xa5\xc5\xec\xe2\x01\xb7t\x08\xe9Vv\xdb\xb4fe\xf3\xa2\xea\x02FD\xdc\x17W4 n\xcb\xae>U\x0d\x87lx\xca\xbc\x90\xb4\x03\xfa%\xcc\xd9\xd4o\xd5)\xef\xe9\xb6\xa3[\xb3\x82\x93\xc2\xa6\x5cwY\xa7E\x0d\xab\xc2j\x1a\x0ey\xc9EG\xcc\xf9m\x17\x04$\xbdh@\xcaAy\xcb\xfat\xbam\xd0{Rv\xb5\xda\xd0\xe7)\xef\xca;\xe4\xb6\x8f\xd8\x14\xb7\xe5\x11E\x93\x02R6\x1d\xb1,'\xa9*h\xb01\x13\xbc\xf6\x7f\xecf\xf7\xf9\xb4\x97\x5c\xe3\x90eI\xc3z]\xb0!a\xd2\x11y1\xc7\xd0\xed[z\x1d\xf5!9m\xbaMk\x160eS\xdd\xb6\xa49\x19!5\xd3\x8ei8\xec\xb2\xd7\x8d\xc9\x89\xa9{\xcf\xae\x90]\xab\xaa\xb6\xf6N\x9f\x8b&\xfd9\xd3v\x8cx\xc0\x97\xac\x1a6\xe0C\xaa\xc2\xe8\xd4\xeaM\xef\xcb\xfb\x94\x9a\xa4\x82\xaa\x16\x01wm9dF\xd2\x80\x09-\xb6\x8c+\xfe\xd8\xa5\xdac:4\xd4\xbd(b@R\xd9\xa6\xf3\xb6L\x19V\x17\x17\xb4&(\x8e\xb0\xe3\xc1}\xae!p\xee\xf1\xf5\xe7\x87\x15\xf7\xaa\x96\x8a\x86\xb3\x06\xf4\xbbn\xc3\x9a%\x8f`\xc5\x8cE\x11S\x06\xc4\xcdHH\xe9RT\x94@\x8b\xb03\xf2JV\xc4\x9d\xf2\xa6s\x164\xeb5g\xdeA9\x03&\xb5\xdbQ3\xa6\xe6=O\x8a\xc8Z\xb0\xee\x84I\xeb\xd2\xa2\xe6<\xec\xb23\x16<cJ\xafu]:m\xc99\xa4\xc3aW\x0c)[\xf7\xa8+\x0e\xd8\xb4\xeb Z\xec\xa8\xa9I*9\xe2MM\x96\x0d\xb9\xe1\xa0\x92\xb4m\x01\xedb\x8a\xaar\x9a\xab_\xfc\x7f\xbe^\xd8\xf7\x0c\x99\x12tGHA]]@\x87\x09\xb3^\xf3\x03\x19\xc3\x0e\xc8\x8aX\xf7I?-\xe9\x84\x82i\xcdRJ\xeej\xe8\x134n]\xc2\x94f\x1b\xa6m{G\x9b\x9cS64\xeb\xd4'\xach\xc6\xb0\x15'\xf4X\x14\xd3%e\xdb\xebN\xeb\xb0\xacbX\xd2a\x9f\xf4=-\x9a\x1c\xb7\xaa\xc5\xebvuz]\xe7\xdeC:\xa5\xa2\xa1\xa1\xcb\xb4~\x9bvt\xc8\x08\xabhR\x11\x102\xea\xeb\x8e\xdb\xf6\x88\x8c\xa8\x11eI\x87\x14m\x89);\x22\xd8\xa8\x06g\xf7{\x1f\x02\xc3\x8a\x1e\x16\x10\x13\x15\xd5d\xd7\x80\x9a\xf3\xbeh@\xc6\x8eui'\xbd\xeee+V\xc5\x0c+ZQqA\x99=\x7f\xc6\x83v\xf5)\x1b\xd3\x8d\xb0k\xd6]T5d\xd9[:5\x04\x04\x5c\xd6l\xcb\x96\xa4I\x0f\xfb\xack\x8e\x08\x08a\xd3\x15\xef;\xa3\xe8\x92u[b~\xd6\x07]qD\xc1\x05\x17\xcd\x9a\x11\xd2\xaenI@Q\x9b\x11\x8bZ\xb5\xdb\x15S\x91P\x17v\xc2\x9cc\x96\x1c\x91\xf2M\xad*\xee\xe8\xd6)cXNR\x7f\xe8\xcd\xa1\xff\x09\x9e\xac\x96\xc7O=\xbff\xc0\x80e\x19\xdbj\x0e\xd9u\xce\x84]\xef9\xad\xc3\x98'\xbd\xef*F-Z\xf4\x88.K\xb2\x1a\xe6\x9cp_JA\xc8u\xbfl\xd5\xa4iq\x9fv[Q\xd0\x8e\x8c\xb0\x92\x8aM\xed\xaa\x96t\xe87\xed\x94\x17\x85|\xda\x8f\xac*\xcb\x89i\xd3l\xd2igt\xcac\xd2\xbc\x87\x9d\xf0\x9b\xea\xee\xd8u\xd2\xa6\x92\x9819+r\xea\x86u\xf9\x91\xcf\xcb\x98\xd6iZ\x87\xa2\x98;6=b\xc9\x03j\xf2Z\x14\xa5%\xd4E\xa5\x1bk\xc1\xfa\xef\xce\xcf\xed{@\xce?}\xe9\xd91sn\x1b\x92\x15\xb6.\xee'}\xdf\xe7]r\xd4\x9c\xbcV_\xf5A[\x02\xae\xe9\xd4d\xd9\x84\x1e\xa3f\xb5\xcb\xa9huHJ\xc9\xfb\x18\x10\xd6\xe1\x1d\x11G\xa4Uq\xd5\xb6\x0b\xe6l\xea2\xa2`\xdaq\xabNI{\xcb9\x87\xf6\xc6\x96%m\xfa\xcc\xb9)\xbc\xe7\xf0{\xd6\xfb\xbe\xe53\xee\xfa\x09\xbb\xaekw\xd0\x84\xb4\x94\xb2\x80\xba-3\x9et\xc9\x9a\xc32:\xcc\x18Wt\xc8\x90\xf7\x9d\x14\xf7\xb6Q\x05\x9bN[Uu@\x80@\xed\xbb\x8b7\xf7= \x1f8\x94\xff\xa9%G\x95-k3\xe1A\x01q\x01\xdfp\xc4\xae\x8b\xae\xca;bF\xc1\x94\xb0\x03\x0e\xb8\xe3\x11)\xef\x09I\x0a)\x88\xca\xc8z\xd2\x17\xfc+\xf7=\xed\x9en)\x97\x9c2kW\xb3\xf3&\x04|\xc2\xdbv\xb5\xc9\xdb\xf0q}.\xdbt\xc6\xeb.:\xec\x15U\x07\xbd\xeb\x84\x9br\x82\x1e\xd2\xac\xe8\x0b.\xfa\xa7\xa2\xaeh\xd20\xe4\xbaC\xce{G\x8f\x8cG\x85\xd5< \xa0\xd9\x823Z\xdd\xd4e\xd0\x0f\x1d\xc6Mo\xfbE/\xd8\xd5nI\x9f\xba%[\x8d\xe3\x81\xe9\xdf\xdb\x98\xd8\xf7\x80\x9c\xfe\xd0\xd4\xf3\x83\x96=e\xcb\xb4\xa4\x98\x90'\xac\xf8\xa4o\x8a\xd9\x10r^\xc0mK:\x9c\xd7\xe4\x96.\xf7b,\xfb\xf0\x00\x00 \x00IDATd\x9d7\xab \xa5E\x9b!\x09eo\xfaUy\x13\xa2\xb6\xd4\x8d\xe9\x91u\xc2\xdbv\x84E]S\xd5\x10\xd5\xd0oZ\x93]\x7f\xc1K\xc6\xbc\xe25A!\x05\x0d\xad\x06}@\xda\xa2\xb0ao\xb9\xec\xa2)\xe3\x96\x1dsK\xdc\x8e;\x86MI\x99\xb2#)\xa7EA\xc2\xbcw|\xc2;\xda\xe5\xb4Z\xd0\xe9\x90WD\x1c\xb7jPZCPG\xe3F\xa0\xffwg\xef\xec{Q\xaf\xe9\x17\x15\x17\x90\x14\xd3o\xd5\x8eU\x8f{U\xd4\x9a\xfbN\x9b\xb6\xaa\xc7\xa8qWl\x09\xeb\xc0\x92\x05\xcd\x0e\xa8)\x8b\xb8-gA\xd4o\xc9h\xd1\xac\xe0\xb4f/8h\xce\xa0\x98\x80y\xc3\xc65+\xa8\x8a\xc8\xbb\xa4\xdbw\xfd\x15\xd7m9\xef\xb8\xb8\x01-\x96-\xbb\x83\xc3\xae\xbb\xa2\xeeO\xfb]\x83\x0a\x06\x14\x9dS\xd4\xee\x88ygU%u\xab\x0b\xc88(\xa4\xc5\x09\xefy\xd4%\x0f\xba*\xa6K\x9f\xacn\x8b\x8a\x22\xaa\xda\x1dR\xd6!\x10\xf9\x9f\xa0\xcajY\x0e[\xf1\xb8?\x92\x92\x90v\xc0s\xb2n\xe9p\xd6\x98\xc7\x5c\xd3\xa5SY\xc0\x8a1iie\xed\x1eS\x91sWH\xd6\xfb\xba\xa4|\xc2\x90\xa7\xd5\xbc\xaaE\xa7\xd7m\xf8\x88U\xd7U\xec\x0a\x187oNN\x9bN\xb3\x1e\xf6);\x92^\xb1\xa3YV\x8b\x80[\x9a\xec\x18\x96\x92r\xd33\xce\xaa\xf9\x86>K\xea\xe2\xe6\xbd\xe5Yd\x1c\xb5\xeeA5\xbb(:\xeb\x8a\x80v\x8bF\xbd\xed\xaco\x09\xeaQ\xf0\x8a~\xb3b\x06l\x0a\xd8\xb0.\xa3*\xd6\xd8\xff\x80\x042[\xab\xfa]\xd7\xa4lU\xd8\xaa\x9c\x94\x1d'\xddW\x13P\x94\xb0\xa8\xc3I\xbf\xaaU\xa7\x92{\x86\xacXW4\xa8CB\x97\xa2iaa\x0dW=\xa4jML\xb3\x17D\x1d\xd1-\xae,*\xa0C\xc5\xba\x1da\x93.\xcb\xda0\xee!\xe3\x02v\x15dt\x1a\x90\xd3\xa5E\x9b\x80\xcb\x0a\x22.8\xe6\xbe\x00zMX\x96rY\xcd\xcb\x06\xc5\xe4U\x5c\xd2+gE\xb75']\xf6\x19%\xf7\xac\x1aP5\xa2&\xef\x82\xbcfumj\xba\xf6\xfb\xb4\x17\xa1\xb6\x9e\xf0\x9f\x0b\xc8{\xc6\xbcn\xbb\x9a\xe4\x9d\xb7kIAI\xab\x16Q\x07\xb4*y\xcd\xa6\x8c\x88\x94\xbb{\xe1I\xab\xca\xfamk\x16TS\xf3\x82\xc7\x94\x94\x1c\x134\xa4\xcdIo\x99\x11\x11\x15\xd6\xd0P\x93\x12\xb2\xe2#2ZU\x95\xdd2\xad\xcda\xeb\x92\xc6lI\x8b\xe9\x91\x14q\xc4\x09\x0d/\xbb\xe9Iei\x11\x1dr\x0a\x9a\xf5\xcb\xa9\x09\xf8\x80\xacv)\xf7<cNR\xc3\x09o\xeb\x92r\xc6{\xc6\x04\xb4\x9bW\x10Q1`\xaa\x11\x0d\x0c\x7f\xf5\xf2\xc4>gHO`\xa3\x9eW6\xea\xae^E\xcd\xda\xcc[\xf5)5m\xd2.\xdb\x11\xb6\xec\xb2\xe36e\x9d\xd4#\xa8K\xcd\x98\xbc\x90\xa0\xac\x9a\xa4\xe3\xaa\xee\xfa\xb0+\xd6\xcd\xb8\xef\x8e\xd1\xbd\xc0\x89g4[\x11\x92\xd6//!d\xd4m\x0f\xbb\x22\xe7\x9c3\x1e7\xe0EG\xa4\xdc\x90rP\xd5\xab^w\xc4\x8e\xdf\xf6\xa2\xff\xd3\x09oz\xc7\xf3:L\xca\xeaWtG\xc1\x8a\x8c\x1f\x1a\xb5b\xd6\xe3\xdeV\x176#\xe2\xe7\xad\x9b\xb3\xa6KR\xdd\x9cA9!\x0d\x13\xa2\xea\x96\xf7?C\xce\x07w\x86Z\xff|\x97\xbbR\x9a\x5c2\xa4\xea\x11kV4\xdc7(iYZL\xccu\x1ft\x13\x13\xce\xc8)\xa9\xea\xb0!\xae\xa4_Z^\x87\xb8\x9b\x8e\xdarF\xd4\xa8\xef\x18\xf5\x9c\x16\xaf)\xebP\x95WU\x95\x14\xb0\xa2O\xce\x93n\x8b\xd8\xb0j\xd1\x11[\x0eK\xab\x0b\xdau^\xa7\xebv|\xc2\xbc\xebR.j\xf1C\xdd.\x18\xf7o\x9d\xb1*`PY\xab\x16<\xe4;Z}\xd2%\xa7\xad\xf9\xb7.8\xe3\xb6Q\xefb\xd8\x82\x9a\x11\xbb\x9a\xe5\x1b\xb1@\xe5+\xf3\xfb\xbd\xca\xba\xa7\xb7\xd1a\xcb\x9f\xd2%\xe7I\x93\xb6\xcd\xf8\x82\xca^|^Z\x93U\xa3\xfe\xa6\xcb~\xdd\x88\x9a\x90[vt*\xdb\x96\xb4%lNH\xaf'\x9d\xd0\xe9eO)\x9bu\xc5/\xa8\xf9\xb2\x97\x85\xc5$\xe5\x0c\xec\xf9\x03\xef\xe8\xb3\xe3\x01\xffNYR\x8f\x9c\xb09\x09\xcbv\x94\xcc\x98\xd5j\xc7\x901\x93z\xfdmw}\xd3\xbc'l{\xc5\xab\xfe\xb5\x1d\x8c\x98\x93\xb0\xe2\x92ekZ\x1d\xf4{\xba\xbd\xeb\xba\x8f\xba\xe1-\x1f\xb5\xa0W\xdd\x94\xaa1\xef\xab;hS\xd4vb\xdf3\xe4B0\xd5\xb5\xf9\x17\xbb\xfd@\xbbv\xaf\xe8D\xca\x82\x01k\xe2\x12FMx\xd8\x8f\xfc\xbf~\xc9\xd3~\xa4$i\xc4}\x11=\x1aj\x9a,9$/aAHL\xabW\xb4\x88\x09YS\xf5\x84\xaa)))y+:$\xf7N\xafGT\x05\xb5\xb8\xe2\x90MtY\x966\xa8d\xc0\x80;:\x1dU\xd2#\xe0;\x16\xf5\xc8[\x90\x94\xd6\xe3\x9b\xda\xd4\xec*j\xd2\xef\xa3.I\x18p\xc9Is\x9a}\xc8UG\x84M\xe9\xb1mH\xc0\x9a\x82\xa4u\x93Z\x03\xe5zxk\xf5\xdb\xfb\x9c!;\x81\x85BU\xce\x11\xab6\x8d\xc8\xe87n\xd8\xd7\x04,k\xb1\xa2\xcb\x0f|\xdcoy\xcf?5&h\xc2\xbca\xad\x16\xe4U\xad{\xc2=U\x07\xe4\xd5u\xc8\xfb\x159\xdb\xc6\xbdm]\xc5\x0d19%K\x0e\x8b+*\xd9R5/'\xe4\xb8\x01Gm\xca\xdaqD\xc8\x82\x82\x1d\x93~\xde\x8aUe/yA\x87\xe7\xa5\x15D\xa4\x0d\xebv@QRI\xcc\x8e\x92\xef\xfa\xa4\x15\xd7\xf5Y\xd1#\xec\x86NKv<\xe45q\xad\xeeI\xec\x1d\x8ewk\xa86\xaaO\xfcl\xf8\xf9\xe8\xbefH$xtp\xfb\x97\xe2\xd6\x9c\xb5*-\xa5M\xce\xae/\xfa\x86\x16\xfd\xae;d\xc8\xcbf\x1d\xd3nM\xb31\xef\x19\xb1\xa0C\xde\x86VsFDD\x1d\xd5\xa1a\xda\xa6\xb4\x0di\x1f\x13\xb0l\xdc\xac\x9a\xb4.\x15\xd3\x22\xb2b\x9a\xc5<iUA\xc6\x8an9!\xb7\x04\x0d+)\xab\xbai\x5cD\xb7\x98O\x9b\xd5fC\x875\x01y55\xed*v\xf4JK\x8aX\x16r\xd1-Q\xcb\x0a\x0aj\xf2\xca\xb6\xf5\xaa\x9a\x92P\x10\x15\x16\xb3\xad&R/_\xfb\x9b\xff\xe1Fc\xa2\xb1\x8f\x19\xb2P\x0d?Y\xd1j\xc8\xfb\xc6t*\xc8\xea\x13\xb5\xaa_\xd2\xac\x9aN\xd7\x5c\xb4aHB\xc9\x9cw\x9c\xf2\x86C\x8a\x12.*+Z\xd8\x9b\xea\xd6|E\xc8-\xbd\x9eP\xd0\xa7jEFQM\x8b\xa86\xdd\x92\x9a\x15\xad\x19\xf4\xbe\x92\xac\xaa\xaa\x08v\x0c\x0bh\xb6,\xa9[^\xd0u\x05\x11E\xe3\xde1\xa6E@\x93\xb0\x05\xc7M\xa1\xa6.\xa1_\xd4\xac\xaa\x19'\x85u\x1b\xd0\xee\x11\x15\xa7u\xdb\xb5 \xa6S@T]\xd1y\xe1F!\x1c\x7f\xf5\xc3\x8d\xaf\xd7\xf7\xf1\x93\xf53\x01\x1a\x8f\x0f\xd84(\xe1\x06\xdal{\xc7A\xdfw\xc2\xaeV\xa7\xbd\xe9\xa0\x90g\xfd{\xdbBB\x86L\xfbysb\x96-\x88\xe9\x973\xa8C\x1e\x7fN^\x9f\x90\xabNxI\x8f\x07\x8cI\xeaW\xd9K\xf9\xcc\xa8\x09k2\xa3!\xab\xd3\x93b\x96,\x0b:\xa0\xd9{:\xf7\x82\x22\xd7\xf5Z\xb6\xe5\xcb\xf2\x9a\xbc\xed\xbe\xe3f\x9d\xd6\xed\x87\xe2\xd6Um\x09\xba'nXH\xd6\xbc\xe3\xe6=i\xc3\x9c\xb8\xa0\x05\xf5\xbd-LBZ@\xc3u\x0dD\xf6\xfbw\xeaYd\xf3\x11\x0d\x05\x83\x826-:\xea\xc3\xf2\x0e\xfb\x96n'\xcc\x88\xe85\xe3\x0117\xcd8\xa8d\xc8\xfbN\xb9\xa7jYLI\xdc\xb6Q1\xd3RR\xb6d\x1cs_\x97e\xf7\x1c\xb7eV\x8fM\xebB\xa8\xc8\xca\xc9\x1b\x960o\xc9\xa6'</cF\xb3&\x87-I\xdb\xb5$ ,\xef\x01\x01%O\xc8\xba\xe1#^\x112b\xcc\xa3J\x9aU\x1d\x96P\xd1/\x89\x9c3\xbe\xa7\xc3\x8c\x16W\xfdi\x8b*v-H\x18\xb0%\x22\xa4#\xd0T\xcd?:\x148\x16\xd8\xc7\x1a\xd2\x14X\xd1\xf4d\xe5\xb1\xb8E\x9f\xd0\x90\x165iSM\xce\x98\xb4/\xbb\xe8\xa8\xe3\xba\xbc(\xe3#\xd6l\xeaR\xb2b\xdaq\x05\xc7\xf7z\xeb3&\x9c\xd1\xe7\x96\xa4V}n\xeb\xd5\xaf.\xe7\xba,\x06\xd4\x84u\xdb\x16SQuQCBX\x87\x1e+\xbe\xe3\x84\x8a\xa4eqEgt\x1b\x17p\xd3O\xb9\xa3O\xc5\x8b\x8e\x88\xb9\xafMTZ\xc1\x8c\x902\x9a]\xf61\xb74Y\xd4m\xd7\xa6\x84[z|\xd8o\xeb\xb5nTZn\xcf sHZLp\xa8\xfb\x9f\x0c\x85\xefV\xf7- \xcf\x05\xaey\xf6\xb7\x8f7\xad\x04\x86}C\x9f5=\x8eX1l\xd1\x03J\xfaM\xdbUv\xcd\x17\x0c\xf8\x0a\xaafU|\xc6!\xf7\xa5,\x1a\x93\x13\x14r\xd6\xcb2\x9erS\xd5;\x86\x1d\xf7\x039?\xed\x8f$t\x9b\xb5+\xe4\xae\xa0\xaa\xa8&S\x1e\xf6\x9e-]\x8a\x1euM\xd51i\xa3b\xd2\xe6%\xdd\xf3\x05)\xdfV\xd0kY\x8f\x09m\x0e+\x9a\x95\xc1\x09\xab\xa2\xca\xb2\x0e\x99\xb2%\xe7c.\x1b7g\xda?\xf2\x15EA1=f\x95\x04\xd4\x15U~|\xc5\x1c\x10\x7f\xfc\xdf\xfc\x87\x9c\xfd\x0bHW`Z\xfb\xffr\xbf\xad$\xe3\x97\xbd\xa6\xd3}A\x1f\x15VvCF\xd2#N\xda2\xe4{~\xc7c\xba\xdd\xf6\xb0\x84\xaf\x99\xd7\x10\xf6\x88\x17\x0d\xec}[\xfb\x88\xb0\xb74\x99qF\x87\xaf:\xe7\xbc\xdf\xf09\x0b\xb65\x09\xa9\x18US\xb3#/\xe8\x03\x82z\xbc\xa3\xcbeI9KV\x84\xach2j\xde\x01\x13n\xfa%\xbb\xaez\xdc\xb7\xb4*\x99\xb3\xe5\x98\x0dG\xbd!%\xafY\xcc\x8c\xa3\x1a\xc2n\x8a\xdbpP\x9f\x7f\xe1\xb8\x9a,\xae\x1b\xd7%\xa3WYXLV\xb3\xac\xdd\x03s_9\x1fX\xd9\xaf\x804\x07V\x8d\xfe\xf5Hj<\xb0k\xda)\xef9\xe5\x9e-\xfdn\xfb5\xcd\x1a\x1a6t\xcb(\xfbIoYv\xd0\x84\x88\x0b*\x8ej2\xab\xc3\xac\x88^%\xe7l\x88\x98\xf2\xb4-ox\xde\xbc\x15\x7fV\xd2\x84\x84%\x01\xc3\xde\xf5\x80u5\x15]\xde\xb4\xab\xdd\x90v\xdb\xee\x8a:!lF\xc2\x90\x80Q\xd3\xe2>\xe8U?c\xc7u\xbd*&\xa4\x0c\x9b\xf0\xb8\xf7D\xe4\xb5\xdb\x107\xbc\x97J\xb2\xa1S\xc4\x84\x84\x8a\xbc\x0b\xae9.-$bW\x93-\xcd\x12\xa22\xc2\xc5\xa53\xd9\xbf\xb3\xb2\x7f5\xe4\xc9\xc0-\xe7\xffQ\xb6R\x0c\x1d\xd3j]FM\x8f'\x1dQ7a\xc2\x90\xa0G\xfd\xbe^U\xaf\x0b\x88[\xf1\xa8\xe2\xde\x01\xe7\xf4\xde\x079%I\x8f\x98\x12\x12U\xf7\x92\x84\x8f\xf8\xb2\x0b\x9a\xbc-\xf0\xff\xb7w\xe7O\x96\xdeg\x95\xe0?\xef\xddo\xde\xbc7\xf7=++k\xafR-R\xa9\xb4K\xd6b\xc9+\xc6\xc6\x88\xc5\xe01mc\xc0\x03\x04\xcd\x1e\x01=\xf4\x04\xdd3\xc3\xd0\x01=M\xb34\xb8\xa1\xe9\xa6Y=`c\x1b\xdb\xb2\xb1%Y\xb6%\x95T\x92j\xdf\xf7\xcc\xca}\xbby\x97\xbc\xfb}\xe7\x07gL\xf4\x8f3\x83\xdd\xae\x89\xd0\xf9\x13\xde\x13\xe7|\xdf\xe7\xfb}\x9e\xf3H\xa9\x08\x8dz\xdd\x93^\xd3+\x8fm~\xc15\x09\xa7\xcd{\xc09\xdb\xbd\xa6\xe0}N8b\xc6q\xf7\xda\xae\xac\xd3s\x12\xde\xea\x0b\x22\x1e\x12\xb5h\xd2\x1b:\x1d\xb1\xa6&\xba9\x11\xbf\xe1\x98!\x8b&ET\xe4\xf4\x18\xc6\x82m\x22nm68Tt\x9b\xd7\xa9\x1evF?\xfe[\x9fh\xdc\xc6\x96u\xd3\xe8\xaf\xb7\x9b\xb5XJ\xd2\x92\xb2>\x8bv8e\xc2)\x87\xd5\xc5\xacz\xda\x94\x8c\x88\xba\xc0N\xafJ\xe9\xb3\xea\x96w\xab\xc9\xa8\x1b7\xe7\x11C\x8ar\x92R\xcaj\xe2z\xed\x127d\xc6%\x83\x9a\x1esF\xda\x8c\xb6^k:\xadY\xd4-\xaa\xac\xd7\xa2\xadv;\xa9\xc3\xb2\x0e\x11\x1d*j*\xcez\x9fO\xd8*p\xd9\xa2\xbd\x8e\x19W\xd0P\xd4V\x16\xd7\x94\xd0\xb2\xcf\xac]\x9e\xf7\x1eG=\xea\xb4\x97\x8cKHZ\x17W\xd3\xd2\xf2\xcd\xe2|U\xab\x91\x8a\xfd\xde\xaf\xdd\xc6\x7fY\xc9`\xc5\xd8\xaf\xa7Z\xd1X\x9f[\x22V\xf4i\xca\xe96\xe0\x1d\xa26\xa4\xb1jM\xd5\xb4\x86\xb6\xb2\x0e\x11\x95\xcd>\xf5EI\xeb\x1aRb^\xd7-\x14\xb5G\xd5\xb2I\x81\xf3B%yM\xcb\xda^\x90\x16U\x97\xb2hL\xa7\x0eu\xfd\xd6d\xad\xe9\xb1d]E\x8f\xaa!\xcb\xa2z\xf5k\xb8\xc71\x93\xd6\x9c\xf0\xa895\xbb\xcc\xdae^SL\x97\xa6\x82I3n\xea\xb3n\xc4%\xbb|\xd5\x16\x1fvT\xce\x9c\xed\xa65e\xcd\xe8S\x93\xb2a \xc8\xceL\xfd\xbb\xb7\x05\xd7n\xd7:\xe4R\x18fBa\xf0\xb0E\x93b\xc6\xad\xe9VqE\xd6\xbc\x82E/\x89{\xce\xbdr\xa2\xc6D=%\xaeK\xc1U{D,\xd8\xeb\xa0\xeb\xa2\xf2>,b\xafi%\xdb<\xa1f\xd8\xb8\xc7\x5c\xb0\xa4K\x97i\xef6\xadl\xbb\xb2aSVL\xdbi\xaf>iuE\xc3FlWT2\xe8I{\x1c\x15w\xc3a+\x0aN9\xec5e+bFM[Q\x11QP\xb1\xcd\xab\x86UtX\xd6V\xb6\xe8\xfd\x0a\xbelRA\xc6\x05\x83\x12\xfau)kX\x15\x93\x89\xae~\x9a/\x87\xb7\xadB\xf8\xf5\xe0\xf7\xff\xe5\xde\xd63\xd1nCV\x9d\xb6M\xc9;dq}3\x0ai\xa7\x88\x0bZ\xae\xaa\xebt\xd1\xb4\x1ekvY\x14\x91s\xd3\xbc\xb4.w+\xd9\xe1\x8b\xee\xb4\xa8eI\xb7\xab\x982d\xc4u5\x1d\xce\xd8m\xc0\xb2\xbc\xb61\x13\xa6d\xcc\x193$b^\xc6Ee\x07\xc5]T\x910 0\xe9k\x0e\xa8K\xba`@\xc2\x86\xa2y\xa3\x1a\xea\x9b\xb7\xc5\xf3v\xcb\xea\xd4\x96\x924i\xd9\x94\xaa}jz\x0d\x99\x12\xd7P\xc4\xba\xdd\xa8\x9a\xf7\xe43o\xbcp[\xdf\xf6\xfe\xe9\xde\x96Bp\x97\x8a\x17\xe5d\xc4\xdc\xe1y\xc3\xca\xba\x5c\xf1yw\x1b7\xae\xe5\x15\x83\xa6\x5c\xb2h\xafK\x96\x9c\xb7\xa8**n\xc5\x869\xbd\x06\xbd\xa4\xd7?\x98\xb0\xd5\xb3\xce{\xcaMe\x0f\xebVR\x94v\xbf\xbcK\x96\xf4H\x9a\x91\xb7U\xd5W\xcdy\xd5+\x1e\xb7\xa0\xe6^\xe7\x14=\xe9\xb4c\x1e\xb5\x84P\x87qw\xc8\xb8\xa5\xe2NU\x13\xae\x9b\x10\x8a\xaa\xa8\xba\xd3\x9c\xaau5Oy\xcd\x15{\x95\xbc\xdb\xcb\x0a\xca^u\x97\xeb\xd6\xb5\x15<\xe8\x84\x0dQ\x19\x9f9z\x9b\xbf\x87\xbc%r\xfd\x17S\xed\xab\xd1.\xefpAA\xce\x8a\x0f\xb9\xa8%m\xc6\x16']\x95uF\x9f!m\x9d\xd2\xaajFl8\xe2\xaa\x8a\x11\x81\xc0N'\x15=n\xc3N\x7fn\xca/x\xd5%\x8f:#f\xd5^ym\xab\x9b\xbd\xf2Q\x0d;u\x9b\xc7\xbdn9\xa8\xedY\xc3z\xbc\xae\x0f\x17\xbc[\xa0\x22e\xbfK\x9b\xf6V7 \xe2yw[2\xea\xac\xa4\x8a\x0e\xa1[v\xca\x8a\x08<\xe3\xa3\xbe\xa1\xdfv\x9f\xf5\x84\xaa^\x81\xd7\x8di\xebR7cl\xf3o\xef#\xbf\xfb\xc2\x82\xdb\xd9\xb2\xa6\x0fw~$\x1f\x06\x91}f\xd5,\xeb\xf3\xbd\xcax\xcd\xfd\x027\xbdK\xce\xb4\xb4\x96Y?*p^Bh]\x8f\x86\xad\x9a\xb6{\xc5\x88K> \xa2\xa9\xed\xac\x87\xcc:\xe9a?\xe236\xec\xf0\xdd>\xee\xba\x1dZ\x22\xaa\x9a\x92V\xe5=\xae\xc3\xb2\x15qCV\xb5,Z\xf5vS&\xe5\xd4\xed1\xecUI\x07\xac(8)P\xb7]\xd6\x94{\x9d\xb7!aPQ\xc1^9\xf3\xdaf\xed\xf4\x92\x1fr\xda%c\x86$\x5c\xd0\xb4\xc5\x82\xb8\x9a\xbc\xbd:\x95$e\xb4\x7fa\x22q\xb9~\x1b\x13\xf2\x93\x83\xd7\x7fb\xac\x9d\x8f|3\x1c6\xa1\xd7\xeb\xee\xb5\xeac\xbe\xa0iI\x8fUq\x05{=\xe5o\xac\xcb[Ut\xc0\x8d\xcd5\x8e\xcb\x9b-v\x0bF\xb4U$\xdc0\x22\xe7\xa2o\xb8\xd7\x82\xd0\xba\xb2\xef\xf1i9\xcd\xcd\xa0\xa5Qe,JH\xc8\x98\xb7!'n\xc8Yu1\x97T\x95\xccy\xcc\x92Wd\x1d\xd02\xaf\xd7\xb2.\xa3\x9e\xb5\xc3V\xab\x16\xe5\xec\x11Z\xd5%o\xbfE9SJ\xf6\xca9\xae\xa0K\xcc5\xf7\xbae\xcc\x8auK*\xdf\x8ch\xfe\xcd\x05\xab\xf5\xdb\xf8\x0c\xf9T\xb9C=\x5c\xb7\xcd>\x15i+\x1ev\x11\x17lX\x97\xd1\xadmI\xb7\x19\xabvkh\xb9\xdb\x80\x97<\xa8\xe9\x1e1\x19\x19u\xeb\xb2z\x9cs\x8f\x059\xf3\x06U\x8dzY]M\xc9\x8cO\xd8eUIN\xc6\xbaYUw\x1b\xb5O\xaf\x8a\x8c\x92q\xe7\xdc\xb4]h\xc0\xa4\xa8\x84\xbc%\x11\xbd&,\x1b\x14\xd3aQ\xe0\xb2\x87t\xb8a@B\xc1\x94Y\xe36\xd4\x9c\x90\x12\xba\xe1i\xaf\xeb4\xaa\xa8\xa6\xe6A/\x8a\x89h\xcb\xd9\xaa-\x13\xa6\xc5Z\x99\xf6m}\x86<p \xfa\xe1X\x18F\xd6M\xe0\x9aqc\xb6X\xf5\x15C\x02UQ[\x0d\x99\x90\xf5\xa2\xa8e\x1b\xaa6t\xa8+\x19wCE\xdd\xba^{\xe5\x8d\xbba\xd6\x84\x05\x97L\x8a\xe8t\xc9\xa8P\xc3\xbc\x9d\xaa\xa2\xba5\x0d\xa9 '\xeb\xb8\xc8f\xf0\xc6\xb4\x9d\xfaM+I\xc9HJ\x1a2mZ\xb7\xa8c\x9b\xaf\xe2\xf7\xf9\x9a\x83.Z\x17\xd1\x14\x8a\xab\xdb\xee\xa4A\xc32\xfa\x5c\xf1\x90\xaf\xea1-\xd4\xa7\xcb\xbc\x93\x864\xe5\xf5\xa8[\x15\x93P\x0a\x86~#4\xd3\xb8\x8d\x15\xd2\xbd\xef\xbcZd\xc3}f\xdc\x92Pv\xd5MI\x0f:\x85\x94\x92\x11\xeb\xba\x8d\x08,\xdb\x90TT\xf4NWL8e][K\xcc\x92Yw\xab\xba\xe5\xb0\x16\x22B+\x12&\xdc\xaf\xe5Qc\xe6\x0d\x0b\xac\xcb\x9b\x17\x08\x1d\xf0\x86\xac\xa8\x09\x19M}\x16\x95U\x8d[\x12sY\x9f\xb8=&\xd5\xady\xd4\xba\xeb&\x9c2\xe9\x9cA\x936\x14TlHz\xddO\xb8\xaci\xdeU\xef\xf7\xac\x87\xe4\xe5\x145\xe55e\x94\xd4\x85\xf2\xeaRB\xb10\xb68\xd7*\xb4nk\x85\xc4\x1f\x1c\x7f\xd7LxG\xb0f](\x8a\x1f\x92R\xf4\x0dG\xac\x1a\xb3\xcb)5%\xcf\x1b3\xa4 \xa7 \x10*\xa8\xda\xae\xa9)\xa7ST\xc6uK\xb69\xe9\x8a'\xb4\x94\x0c9\xe4S\xc6\x05N[5\xe6\xfcf#\xe7NS\x22r\x92\xd6\x04\xe6\x94\xdd\xef\x8aE\xe3\x22\x06\xb4t\xe9\xb2[\xc2\xb0\x82I\x15\xcb\x86U\x94u\xbbbX\xdd\xa2\x0e$\x84\x02\x8f\xf8\xaf\xeeq]\x8f\xa2s\x1e\xf0i;e4\x8d{U\xb7\x86\x84\x8a\x98\x96\xa4.\xa1H{c\xe5\xa9\xdf\x8d\x05\x97Z\xb7\xb1B\x8a\x8b\xf7\x1b\x0d\xa7\x5c\xb2S\x05\x9d\x9b\x0e\xddc\xd9-u5\xbfl\xaba\xdb\x1c\xf7\x8c\xd0)u\xfb\x1c\xd7\xf6v\xc7\xecU\xb3\xc3\xacU3\x9er\x9f\x83\xaa\x0e\xfb\x92s>,\xed?\x1a\xd06m\x8f\x92\xb4\x83\x16\x85\x02EI1\x17|\xb7\xa6\xa2^K.\xc8KZ\x10\x88kY3\xe8\x19\xafj\x88\xd9\xefy\xd3\xa2fm\xb8j\xabQe\x1b\xbaUT\xc5T\x1d\xf7\x80o\xf8.\xb3~\xd0\x84\xd7mS5hP\xc3]n\x09\x15\x04*Z\xb6\x99\x16Z\x0a\xa3\xcb\xfb\xdb\xc9\xdb[![{\xe7>\x12\xb4S\x91'}A\xc6\x86-F,\xbah\xb7\xa2\xf7*\xd9\xf0\x8a\x0d=\xae\xb9\xdf\xbaE;\x05R\x0av\xbb\xa6\xc3\xb4\xacW\x8d\xc9{\xbf\xe3\xe2\x8e\xea\xc4\xb8\xac\xcfh\xf9%\xcf\xe8\xb3\xd7\x9a\x92\x8a\x1b\xeesI\xdc\x86\x8a\xb8\x01\xcf\xdb-\xa9\xc3\x9dfdt\xab\xd9\xd0\xd6\xa1\x88\xbb\x0c\x9b\x12\xf7w~T\xdeQ9q\x1b\x92\xa6dL*\xcaK\xa9\xd9\xb0\xd5\x15;<o\x9fc\x06\xdc\xabi\xd5\xbc\xbc%\x9d\xd2\x86\xac\x0bD\xb4L\x1bR\x93h7O\xfc\xb3\xbfZ\xf3\xe2\xed\xdcu2r\xa0.\x1f)z\xc9\xfb\xa5\x84\x9a\x1e\xd6/\xe9u)\x7f\xe3a;\x94\xcc[\xb3\xee%7D-I(J\xcb\xd9\xe3\x03\x02\xbc\xc7\x86\xa4K\x1e\xb7\xdf\xc3\x12\xce9b\xd4;\x1d\xf0{B\x1f\x93\xf7\x9a\xba\x09)\xd3\xba4\xc5\x0d\xc8{\xb71+\x12\x1etI\xdb\xbaEY9{L\x19\x90tIV\xdb\xacw\xd9#\xe9\x90\x9a\xa6I\xd7\x14\x1d0gED`\xcb\xe6\x84m\xd6\xb8e\x09\x17}I]`\xdc\x92I\x09k\x96T\xf5*\x09\xa4-\x89\xd9\x08\x82\xfc\xdb\xc2\xbf\x0aok\x85\x8c\xbdo\xe4\xc1bTp\x9f\xaf[\x95\xd3oI\xdcU\x0fz\xcd6W]\xf4V7\xad\x08\xb5\x94\xe4\x14\xd4U\x95eu\xfa\xb4\x92A\xaf\xeaR\xd2o\xc8E\x11\x15O\xf8\x8c\x8aUQ\x07e\xbcj\xcac.X5`Q?\xcaJzM\x19W\x15\xf8\x92{L\xdb*\xea\xac{\xbc\xac\xcf\xaaA\x1b\x96\x0c{\xd0\x0b.*\xb8\x22\x22\xa3\xae\xd3\x16\xc7em7\xa7\xae%g\xbf\xb2\x96\xa6-rF$]\xf3]\xbe\xea\x87|M\xa0!\xae\xa2O\xd38\xfa\x95u\x08_Y\xfe\xdc\x07\x82o\xc7*\xaao\x99B\x96\xe3\x85p/\xd6%\xd4\x8cH\x1a\xf4\xb2^\xe7\xdc-\xe2Qm\xabBoq\xd3\xacP\xbf\x8cq+\xbe\xc7\xb2\x05[\xecV2a\x8f\xa4\xa29[d\xad:f\xd0\x1d:p\xcc\x8a\x84\xb8S\x06t9\xbf\xb9Z%)!b\x97\x8c\x82\xacm\xea\xfa\xc4\xc4\xecu\xda~\x15I\xa1U\xd7\x8cz\xdd=\xb6{\xc8\xa0]VEd\x5c\xf4Qq\xf4i\x08\xad\x0b4\xb4MX\xd5a\xc1\x9c\xa8\xe3\xba\xbdb\x5cDJY\xb7i5\x85\xcd\xb0\xf1B\xd0\xfe\x89\x0fn\xf9\xcc\xb7\xa5s\xf1[FH\xb53\xd7\xba\x104M\x1b\x12W\x177\xecQ\x09\x0d+\x12.\xe95l\xc0_\x1a6$n\xc1\xb2\xa2!s\xaaB\x0b\xae\x89\xe8\xf0\x86\xb4u9\x17,\xda\xae_\x97W\x94\xc5E$M9\xe4N\xcb\x8a\xee4-\xb4ESM\xcb\x197m\x95\xc4\x16)+\xe6\xd4\x0cI\xe8\xb4\xa8a\xc0\xcf\xb8\xaa\xd7I\x17]QpYV^\xc5\x88O\xe9\x17Z\x92\xb2!\xf0\x0duQ7d\x9d\xb6.c\xc4\x9aQ3B\xf9\xcdF\xbc\x1e9U%Y=\xda\x81\xea\xd1\xa9_\xed\xbam\x09\xf9P\x84\xe8\x9e\xfe`\xab\x0e\xbd\xae\xeb\x94\x10\xf5\x92\xc0\x8a\x9c\x96-V=\x22m\xda\x1e17\xb5,o>\x85\x96\xacX\xb1\xa0\xd7\x0dW\xd4\xb5-\xca\xabx\xd0\xb3\xca\xde\xb0\xa8\xd7\x05WMK9\xee\xac\xef1eC\x5cFF[\xcc\xba\xbd8\xe3\xba\xdd\xae;\xe4\x11K\x06\x0c\xa8*j\xb9\xa6\xec\xa8y\xd7=\xed.Yw\x9b\xd5\x90sY\x87m6,\xebP\x91t\x8f\x94\x84EO\xb8\xee\x0e\x05\x11\x0b\xdecJBI\x8759yM9\x09\x11-M\x19\xadT5\xfc\x93\xe3\x82GS\xb7\xe5\x19\xf2\xc1\xe0\xcb>\xfd\xd1\xe5\xf1\xe9\xd8.UU\x8br\xee\xb0\xdf\xdf\x1b\xb6j\x9b\x0bv\xfb[\xcb\xd6\x15\xe4\xddaQ\xb7Y\xe3\xaab\xc6D55\xf4\xd9gN\x5cJ\xc4\x03\x9e\x135\xef~\xa3\xbe$\xf0>\xdb\xc4\xecu\x8f\xbfqHU\xcb\x86\x0b\xc6\x15Ud\x8d\xca\x99p\xc6\x9a\x94c\x0e9/\xaf,e\xd2\x84\x98%O\xb9\xdf_\xd8\xef\x92\xaf\x98\xb0\xc3ucJ\xea\x22\xaa\x96\xf4XS\xd3!k\xdd\x09\x0f\x98\xd3cY\xe0\x8a\x01mIy\x1d\xe2\x0a\xee\xf3\x9a\x9c\xad\xa6\x0c\xcb\x8b\xd9\x1b\x5c\xcdn{\xfc\xd8\x9f<\x1a\xdc\xbc\xfd\x08\x89\xf8\x910\xfb\x9e\xca\xae\xeeX\xdd\x88\xb8\xa4\x82\xd0\xa0-N\x09\x9d\xf5\x01}\xe2\xde\xeb\x93\xde\xee\xa8E\xbdf\xa5m\xd3\xad\xed\x88i\xa1\x09\xcb\xae\xe9Q\x93R\xd0\xe9\xa2\x019\x11\x17e%\xc5\xac:\xa1b\xd6\xcf\xfa\x03\x9d\xaa\xc6\x14Ud\x94}\x9f\xcf\xeb2e\xbf\xaa\x03f]\x13\xb7OAY\xc25=\xfew_\xd2P\x97\xf1\x92\x87\x9d\xb7\xae\x0b-k\x02yMMi\x111\x05\x8fI\xb9\xec\x0eS\x9a\xb6J*(hb]L\xd6UiMeIQ\x03\xb2n\x18\x0a\x9b[\x86z\x8e}\xf16T\xc8;\xa2\x1f\x8e/\xfd\xda\xee\xfeR\xb4\xc7\x8c9M#\x1e\x12\xf3\xa29\xc3\xbe\xcbk\xe6ms\xd2\xfd\xfe\xd0=Zn\xca\xe9\xd5\xb4\xe8\xe2f\x86\xfb\x825\x87-\xc8Y\xd4\xa3m\xd8\x0d\x19\xcbj\xa2\x9e\xc6\x19-\x0fY\xf6I\x13ZJn\xbaS\x87i\xa1\x17\xfd\xb1O8\xe8\xc5\xcd\xd9\x91\x11m/\xdaj\x0c\xc3\xa6|\xcd\x01\xc3j\x06D|\xcd\x16\x11\xb7\xdc\xa1\xb5\xa9\xcb\xa8\xb6\x0d\xa1C\xb2\x8e\x99\x94\xb0\xe1Nk6tZ0\xa8G\xa7\xb8\xbc\x11y}Vtj\x98\xd50a\xc1@p)\xd2\xf7\xc0\x81/_\x9b\xbe\xed\x08y4\xf8\xb5\xe6\x8e\xf7Mm\x15+\xda\x22\xab\xa8\xd3\x05\xdb\x9d3i\xc6eG\xec\xb2$\xe3\x8c-\xce\x88\x1a\x960d\xcc\x86\x9f\xf2\x8c\x0d}\xea:\x14m\xe8\x16\xd1\xd4c^\x97k\xee\x94\xf01\xffAI\xa7\x8a\xb2\xdf\xd4\xe19\x83\xda\x12\xce\xd9\xd0\xa5\xe4\x87}^RT\xda\x1e_WC\xcd\x80~\x05\x0b\x1a\xf6\x89\x98W4\xe9\x05\x81e\xdb]\xb6\xd7M\x84\x9a*\x86\xcd\x1b\xb0\xaeh\xc9AW\x0c\xb8,bU\xc4\x8cP\x87\x9a5E\x81\xba\x9d\xa6%\xf5\x1aR\x95qF\xbf\x92\xc3f\xda\xab?\xf6s\x7frW\xe9\xe8\xedEH-\x9eO\x1f\xf8\x9dB\xaa'8b\xc3E;]\xf0+\xbe*\xe2\xba\xd0\x8f\xa8\x99\x138mPK\x97\x88%m%Q\x11_\xf2\x90UE;-\x19\x14\x93\x17\xd1o\x19)Y'|\xb7\xbf\xf4\x93\xa6\xdc\xd4\xab\xe0+\xe6t)[0b?\x9b3\x1c\x1ft\xca\x82\xd0u\xbf\xa0\xcf\xb4\xb2\xbaYm#\xd6,\x1a\xf2\xcf\x5cR\xd6\xa5\xc7M+\xa2\xb6\x88Z\x954f~\xb3\xd4\x9b\xd0\x94\xb1\xe0\xb0Ew\x98\x12X\xf6\xb4\x1b\xd6\xa5\x8do>0\x8f\xb9e\xc8\xbc\xbcP\x87\xa6\x92\xb8\xb2LP\xde8\xfb\x9e\xcf\xff\x87\x07\x12\xb7Z\xb7\x11!\x99\xc8{\xfa\x17\x7f\xa5\xe0\x8e\xe0U{\x9c\xd1\xe5I\x9d\xae\xa8JY1(%\x94sQ[R\xc9u\xa3z<\xe0+z\x1d\xf45]J\xd2\xa6m\xb3\xaadX\xce\x9c\xa8\xa4\xad&t[\xf4\xaak\xa2\xb6\xd9\xab\xd3\xa0\xa2\x9bvYsS\x87\x88\xa2\xfbU\xd1o\xd5\x98W\x8dhh\x89\xe9\xb3.mP\xd2\x09\x17\xec\x16\x88H\xb9\xbc\xb9A\xa7 \xa1\xa2\xc7e\x03\x96t\x8a(\x1a\xd5\xa9,\xba\xf9\x105\xe0\xb2\x05\xe3j\xd6\xb4lh\x9a\x13\xea6\xa7\xe1\xa0@LU\xcd\x16\xd3z\xe3\x8d\xc1\x81\x91\x13\x9f\xb9\xad~{\xd7\xc3zl\xc5\xbd\xfe\xd1\x87\x5c\x966\xad\xee\x19\xdd\x02\xa1n74\x15\x9cu\xbf\x1e/\x89aC\xdd\x09G<\xa0\xa1WZ\xc4\x9a^\xe7\xf4\xea\x905\xebay5\x81\x84\xba\xd0\x82\x83\xdan\xe9T\x966g\xbbY+z\xec\xd5\x105\xa7`\xbb\xa6\xc0\xb4\x87]\xc5\xaa\xa6\xb4@\xc2\x9a\xad&=e\xc1\xa2\xa4\xb4m\x9a\x1eW\xd7mH\xdc\x051\xab2\xf2Jj\x16\xcd\xaa\x9a\xb7WQJB\xd9=\xael>\x9b\xd5$\x0dj[\xf56\x83\x16\xe5%E\x8d+H\x8a\x8a\xb4:?\xb6\xff}\xb7\x95em\xb4\xf7?1\xf3\x83\xb5\xe0\x9d\xbe\xa0K\xd9VMq\x17mX\xb7\xdd\x16\x15l3\xad%'bC\x9f\x86\x19O{N\xd5\x86\x88\x0e-IC\x06-X\xd7\xd0\xef-\x1a\xf6[\xf2\x05Q\xbd\x96%4\xbcE\xd25\xd3V\xac\xe9\x11\xb8!\xba9$0\xa0\xd7\xebB)\xbb\xbc\xa2\xc3\x9a\xb851\xd7\xcd\xd9\xed\x9c\xac\x09\xdb=\xab\xa4\xdfI\xfd\xc6\x94\xd5\x15\x95\xb5\x0d\x89+\x89\x8b;\xe2\xa4\x01\xf3\x16LX\x14W\xd4\x12X\xd0\xab\xc7\xbc\xb2C\xb6\x99\xb5\xe0\xfe\xcd\xc1l\xda\x9a:\xacD\xc6[+?\xf8\x9b\xbfQ\xe8\xbcY\xbbm*\xf5\x99\xa1\x8c\xaa3FE\x90\x93\xb7\xe0A\x03\x1a\x16<$\xb4\xc336\xcc8+mM\x97.=Vu8e\xd5\xb2\x8b\xba,\xb9\xeaE\x13\x1e\xf7C\x12\xaaj>\xe79G\x9cSS\xd7\xe9\x09\x7f\xa5bBF\x8f\x84\x98\x92\x9dJ\x222\xb2\xf2F\x0c*\x9a\xf7\xf7\x0e(xPLL\x87wz\xd85\xdd\xbe\xdb\xbc\xe3r\xfae\x8d\xaa8f\xdd\x16\x15\xe9\xcdk\x9e\x94\xa4\xb2\xf3\xee\xb1nH\xc6\x9a9\x07T\x0dI\x1aBDF\x87\x19\xcf\xda\xf0\x13\x9e\xd1\x14\xb5\xcd\xb2@E\xc6\x1df\xa3C\xc1\xffq\xfa\xab\xf5\xdbF!\xecy:\xfe\xc8\x84\xb8UsB\x07\x84\xd6\xc55\xd4\x8cz\xde\xf7[\xb7\xdd\xeb\xeep\xc2\x9c\xad\x16\x05\x0e\xbb\xa0KEV\x9feK\xba\xedt\xd0\x9cK~\xc0o\xbb\xaahD\xc2\xa8*j\x92\xe2\xf6x\xcdyy\xbd\x9a\xd2\x9af\xe4\xac\xd8b]\xd3\x9c\xa8\xc0\x0d\x07,\xba\xdf\x0d3\xd2\x1a\xfa\xe4L\x18\xf69q\x1f\xb5\xe4\x9c\x98\x88\x9b\xde\x22pJ\xe7\xe6\xd0[RF\xc6\x8a\xaa\xb8y\xfbD-\xf8)\x9f\x94Q\xfc\xa6\x02\x14\x0cY\x13\x18\x10\xf1\xaa\xbd\x16<d\xc1\x07\xbd\xe1\x80\xd3:\xcd\xb8\xdb\xea\xc0\x1f\x7fl\xe4\xf7\xd3\xb1\xb5\xe6mAH\xf7G\xe2\x87\xa6D\xbc\xc3K\xc6U\x1c\xb6\xec%\x1d:e}\xc8\xe7\xcc\xfb\xba\xfb}\xc5A\xbd2z\xfd\xb4W\xa4\x9c1m\xc8\xa2\x8a\x9d\x12\xdea\xdeO\xea\xf4\xbf\x18\xd1#\x14Ws\xd36\x09\x9dJv;\xed\xc3.H\xb8\xaa\x22-\xa5\x22\xa5\xa2\xec\x01+Fmu]L\xb7I\xcb\x16\x8c\x8b\x19wN\xd59)o5\xe0\xe3F\x5c\xd74e\xbf/\xca\x1bsSKS\xcc\xfa\xe6\xd6\xd1\xa6\xb2\x1d^\xd30\xe6\x05\xbb\xac*8\xecu\xdb\xb4\xb4u*Y0l\x87\xb2\xa4\x82\x8a\x93z\x9c\xb1\xc3U\xbb\xcc\x05\x0d\xed\xf8\xd6\xff\xf9\xe4\xdf6\x17\x9e\x88\xdfh\x7f\xc7-k|\xc7\x90\xba;|Y\x97e\x09/\xe8\xb6OC\xcb\x98?\xf7AYO:j\x8f\xe7\x14\xc5\x1c\xf7'\xf22j\xeeVSw\x87uO\x8bH{\xc3g\xed\xb0K\xb7\x94Ny\x81a\xe7\xf5:\xe8\x94)\xbfo\xcd\xb2\x0e\x0d\x93\x9b\x81|\x0dO\xba g\xaf\x9a\x84\x92K\xe6\x1c\xb4\xea\x96\x0d\xa7\xc4dty\xd8\x17\x9c\x16J;o\xda\x88\xb2\xadb\xear\xe2\x06\x95\xb5\xc5\x9c\x974!e\xdeN\x03Nk\xb8&\xa7\x8eA\x17\xb5\xcd)H\xdb\xa7\xe0e\x09\xd7\xc4\x5c\xf7\x949\x8f*\xdb.\xaaK\xdb@\xe2Z\xfb\xce\x93\x8f\xfd\xab\xe7\x1boI~\xc7\x152\xf8'\xeb\x8d\xbehV\xde\xa2\xefQ0\xe8%{\xa5,\xe8\xf0V\x9f\x92\xf3\x9c\x8cK\xee3\xa1\xe9\xbb\xac(\xeav^\xd1\x92\x86\xa2wx\xdd%G\xbc\xa0,\xee\xbc\xb6N\x97<\xac\xe0\x15\xdb\x8dX5\xe6g]3\xab!\xa1\xc3u\x9d\xae\x0bP\xf6\xa3\xbe\xe2\x0d\x87,Jk\x99v\xc9\xf7Yp\xddn\x1b\xae\xe9\xf6Uo\x95\xb2\xcd9\xdb7Cb\x0f\xab[R\x16S\xd2\xaf\xd3\xbaIKV\xb5\xb5D\x15l5\xa8\xd7-Y\x8b\xd6=`\xda\x0em5\x0b\xea\xbe\xcf\x05\xeft\xd9\x13^\xd1\xe7\x0d\x03X\xd0\xb2a\xc3\x96`\xa1\xb9\xfa\xc4\xc4\x87&?sn\xed\x9e\xc8l\xf8\x1d$\xe4\x8e_o\xd9\x1e9\xab.\xa7*eN\xa7\x1b\x1a\x22\x9ev\xc2\xe3\xce\xca:k\x973b\xb6;\xae_\xc5i\xc1\xe6\x10\xcc\x1e-iYQ\xd7t\xa8[\xb2[S\xc2\x92\x05\xf7y\xd0\xa2\x84.\xafj\x983\xa4\xae\xa5d\x8b\x92nkz\x8d9jH\xd6\x84\x0b\x92\x9a&,\xd8\x10(\xb8%g\x9f\x86\xf7\x9a\x11\xc3i\xa1\x84E5\x11\x8b2\xaaR\x02-eQ\x81\xb4@T\xc4\x9a\x15\x0dQ[\x15\xb4\x0d\xb9\xa0\xae`\xbfY}V\x95e5\xc5}\xc9\xb0\x98\x8c\x9a\x0d\xe3\x9aFD5\x8cF\xe6\xc2\xbbz\xaf~\xec\xc0\xd27^\xfb\x81\xc4\xd9\xd6w\xc8\xb2\x9eKG\xa4\xda7\xd4\xe5\xed\xd1\xab\xa5$\xefQ9\x87Lk\xfb{\x15+\x1e1\xe4\xfdZ.\x0b\x9d\xd4\xd40\xa0O\xdb\x84mf\xf5\xca\xdb\xb0,.\xefm\xa2J\xb6I\xd8g\xb7E\x0d\x8fyV\xc9\x15\x1bV\xd1k\xc8\x0c6\xe4dM\x89o6\xbae-\xa8:-\x14\xd5Ps\x9f\xaa%q\x7f\xe6\xb8^+\xb6m\xde6\xa7\x10\x8ai\xe94!n\xa7%\xbdjZ(\x98\xb0S\xc1\x92\xb3\xa2r\x96\x85v\x89\xb8)\xa3,\xaa$\xe35\xcb\xfe`s`4\xa6C\xc4\x8a5\xeb*\x96\xc4\x83\x19\xa5\xd8\xd4\xc7\x0f\xff\xbb\xff\x1c|\xc7\xce\x90\xb7V\xc6\xd5\x82\x88A\xb1of\x81\xc8\xe8\xf1\xa2\x9d8$\xa5\xc7\x86\x8a%\xa1\xb4\x94\xa81\x93\xd6\xac\x1b\xb3\xa0\xcf\xa8\x84\xb7\x1aqHE\xbf\x96\xacU\x0b\x22n\x18p\xd0\x94[\x9e\xf4Y\x83\x02\x11muUM\xbdB\x93\xd6\xb5\xcdx\xc5.I\xe75\xec\xd3k@`\xd16\x19\x19\xaf\xea\xd56\xee\x1e\x07E\xec\xb4.f\xc4\x82NW\xa5Ee\xachK\x8b\xea\xb7lTE\xc5(v\x8b\x1a\x931\xae\x22+\xe5\xba\xa4@^T\x5c\xc9K\x8e\x18\xf7\xab\xfa\x5c\x97\x93\xb1l\xde\xa0uw\xc9;LF\x01?\x00\x00\x10\x96IDAT$.\x94\x8b\xf5\xb7\xcb?\xf7\xc8\x9f}\xc7,\xeb\x97\xe3\xe5\x7f\x99ouF\xaf\xfba\xe7\x5cQ\xb1\xdb\xac\xba\xd7\xec\xf0\xaa\xbc\x88mN\xdb\xa9d]\xd6\xdb\x5c\x12\xdb\xdc\xf2\xd4\xa1\xdb.1\xbcn\xb7\xcf\xeb\x10w\xc81\xdbT}\x8f+\x0e\xc8\xdb\xe1\xcf-j;m\x5cD\xa0!\xaa fU\x97\x88\xba\x03F\x5c\xb1\xee\x01\xa7\xbc\xc71\x19Q3v\xba\xee\x889#\x12\xaarf\xcc\x9a\xd5T\xd2\xad\xa0\xa9\xad\xe4\x9bq\x0c=\x8a\x02\x11\x07m\xf1\xa0\x05\xabN\xeb1\xeb\x88)e\x0d%\xe3X6\xa4\xa1[\xc6\xb8\x0b\x22\x9bed\xb7\xb21u\xfd\xaaBwy\xd9\xb0\x11qA\x90R9\xf0\x91O\x1d\xcf7Z\xdf\x01\x85\xfcV\xa3\x8d\xab\xd2\xa6\x85\xde\xa7\xe5%y;\xfd\xa4>\x1f\xb4\xe8\x96s\xb6\xda\xa2\xe0\x83\xcanY\xf2\x82m&\xc4\x04\x8e8\xef\x86\x11\xfb]\xd1\xb2&o\xc6\xb0%\xa1\xac.\x9ft\xb7\xa3\xfa\xf5k\xbb\xdb\x1d\xca\xe2*zU\xed\x14\x13\x11\xaaXs\xaf\xba\x989s\xb2\xb6\xa8\xcb[\xb2l\xd4M\x15\xc7\xachzCA]S\x14E\xa5\xcd\xb5.\xfd\x92\x0a\xa6\xac*\xcby\xd6Y\xcfk\xd8\xae\xdf\xba\x86\xd7\xcc\xba_\xd5\x117\x15\xec5\x83\x92ekbb\x8e\x19w\xb7\x13\xfa\xccX\x11\xd1\x14\xf1\x86\x9aE/+X\x13\x13i\x7f\xf9\xd4F}O\xe4\xbf\xa3B>\x10\xeb\x0f\x9e\x8e\xbe\xdc\xfe\xe9_9\xf3d*\x92\x0d\x9e\xb0\xa4\xd3\x17\xedS\xb7\xd3Y\x81yG\xbdEY[\xd4\x0d;\xfd\xae\xf7\xb8\xa0\xdf\xaaPA\xdc>\x8b&\x8d\xfa\x9ag\xcd\xebt\xc8\xac)#\xb6\xeb\x93\xd0i\xd2\xa75\xa4\xb5\xc4\xb5\x9d\x93\x10\x1aR\x90\xb0\xa1)\x94T2%T\xb5\xa2\xe6NG-\x9b\xb0\x22P\x10U7\xa9,\xed\xbawY\x90uQ(n\xdc\x8a\xb2\x8811\xc1f\xd6]\xdc\x90\x9b\xb6)\x89[\xb4MV\xcb\x92w\xf8\xba\xddf\x84\xfaL\xdb\xb2y\x09\x19\xc1\xbc\xa6\xb6\x1b\x1et\xceV\x11\x11\x81\xab\xe8\x10\xea6#\xa6\x22\x17\xf4\x15b\x8f^\xfc\x8b\x1f\x0b\xde\xf8\xefC\xc8\xfd\xb1/5o\x84/\xb7\xff\xe2\xbe\x17\x7f\xa6=<\x14\xebR\xb6\xdf\xeb6\xb4u\x9a\xf1\xb4\xbc\x01\xd3\xae(i\xe9\xb6..cU\xe8\x92\x94\x1b\x0e+y\xbf\xafKm.\xac\xaf\x1bV\xb2*\xa6\xeag\xbca\xd9\xa2.\xc7t\xc9+Ji\xebuS/\xe2B\x0d\x1bBu\x19M\xb7\x10H\xa9)\xca8$iQI\xda\x88UI\x17\xed\x97\xb0\xa8\xe5\x86P\xd2\xaa@\xc9.\x059\xefsT\xdc\x0f:\xed\xb2^\xf3\x1av\xeb\xf7\x19\x07-\xdb\xe2\x92\xb2\xa6\xb4\xb2\xac\x86nMM[\xccZv\x975\xc3\xce\x1b\xd4e\xc9\xb2\x98oF\xc1\x0f)kh\x9a\xb4(f5\x11\xd9\xf5\xceg\xfff\xbe\xf9\xff\xc2\xb6\xfe\x09\x01*\xef\x9e\x1c8\x16F\x22\xbd3vx\xdd^9=^\xd5R\xd4\xaf \x8ea\xd7\xd0\xef\xaa\x09uwk\xd9\xaf\xea\x0fuX\xf5\xa4u\x03\xaey\xca-}\xbe\xaab\xc4\x86K\xc6\xect\x9f\xcb\xca\x0e\xfbmM\xef\xb2,/aA^\xaf~\xab*\xd65\xdc\xe5\x82\x9c\x1d\x1a\xce\xda*4\xa7\xcb\xb8\x0f\xf9\x04\xceH\x8b\x98\xb1\xcb\x9c\x92\x9d\xae\xb9\xd7\xba^\xe7\xad\x8b\xe9\xdc\x8c\xf7\xbf_\xd3\xb2\xbb\xac\x9b\x96\xd3pM\xcb}^\xd5\xe11\xff\xe8\x90ys2z\xcd\x1b\x95W\x16U\x13S3\xa4\xa5`\xc3v\x8fz\xdd\x8a1s\x96t\xc9Z\x12\x91\xb3\xa4C\xd5\xa8\x86\x92b+\x11=\x9eM\x95\xbe\xed\x0aID\xffp\xeb\x95\x8b\x1d\xd9\xee\xf4\xce\xd6C\xe6\x1d\x08v\xd9\xe1o\xf4\x99\xf7V+j\xaa*\xd6D5lH\xeb\xb3$\xb0l\xcd1y\x1b\xc6\xf4\xc9\xe9\xb4\xdf\xaa\xa8\x15+\xd6\x0dy\xc9\xcf\xaa\xe8\xf7Y\xf7\x0b\xfd\x9f\x06\xdc\xeb\xcb\x9b\xd5\xc2\xb2~\x091\x8b\xf2\x86\xa4\x14D\xedrRR\xcb\x92\x07\xbc\xacm\xd0\x0b\xf2f\xfd\xbc\xaak\x08\x0d\x1bt\xcb\x88\xed\xd2\xba}\xd1\xc1\xcd\xa9\xf4\xb2\xac\x9b:\x0d\xea\xb0\xc5\x19\x19I\x19\xdd\xce\xd9\xa5\xee\x0d\x19\xab:\xbd\xd5\x09\xab\x922\x12\xba-*\x1b\xd0T\x12\x8a\x1a7\xeby\x13\xaa.\xa9\xeb\xb1l\x5cFMUJ\xde\x11\xa7\xa4\xd5\x0cF\xd2\xb5\x7f\xfb\xc1?\xf8x$\xf6\xff\xb4&\xf9\xffD\xc8\xfb\x13\xbf\x1d\xfe\xe5\xc9\xf1\xd4\xc3\xf1\x1e\xd7\x22\x97\x83\xb7\x053R^0iU\xb7\x86YyY\xcb\xf2\xe2j\xda\xca:\x0d\xbbh\xb7\xab\x8aR:\xa4\xed\xb2dLF^\xa8\xcf\xeb\xba]5*\xd4\xe1^\x0f\xf9+W\xa4\xa4\xbda\xd467\xd5LjhX\x90\xb5\xd5eYI\xdd\x96@\xbfeS\x0e[\x977)\xed!\xbfo\xab\xa6\xa45ME\x11u\xd3r\xaa\xfe\x85\xcf\x1a\xb1aPRS\x8fi\x11u\xe7=\xa8)\xa5lCAAC\xa8\xaej\xcd\x15;%\xac\x8aZ\x90\xd6\xed^\xa76\xb3\xb3b\x96u\xf9\x01\xe7\xcd\x1aV\x16\xd3c\xc6\x92\x94\x84\xa6\xc6\xff\x9d\x5c\xd4\x10\x8f\xa5{\xbe6\xf4\x95O\xbf#\xb8\xfa\xed\x22\xe4\xa3\x91\xbfj\x9e\xfa\xd3\xed\x8fu\xc6\x8bBk\xa2\x96\x0c\xf9\xaaUS\xb2\x96TU\x04\xeabh\x1aV\x10\xba[Q\xe8^\xd3\x92\x16\xed1 \xe2\x92\xeb^\x12\xb5\xc3\x89\xcd\xc8\xa4\x8c^7\x5crI\xc2\xbc\x0eK\x1e\xf5\x9a\xb8\x9c\x9a\x8a@LL\xdee{e\xd5\xf4h\xeb\x12\x17\x97\xd76'\xa1\xaa\xa5\xdbs\xd2\x16\xbd\xdb\xaayDD\x0dX\x91W1`\xdd[uK\xa8I\xda\xf0\x88\xa2\xa8$\xd2\xeaR\xf6\xbb&\xd4+\xd0\xa3\xa6*&\xaf\xa1\xcb\x92\xbb\xcc\x8aX\xd6!\x8a\xb4iY-W\xac\x8b)\xdak\xd8U9G\xcc*\x1a4\xa6\xa4eT\xcd\x9a\xb4\xaa\xc4\xf6[\xff\xe6\xea\xb7\xcb\xb2\xde\x1b\xfb\xeb\xf0\xf4/\xf5\xfc\xf2C\xad R2\xa6W\xd5M\xeb\xd6\xd4\xc54L\xba,\x94\x16j\x19T\xd7v\xb7N7\xe5\x85\xde\xb0\xa8e\x97\xabv\xf9\x07uq-\x97l\xd7\xf0\xe3N\xdb)/\xee\xa0cJ\xae\xdb\xad\xac\xc7\x0b\xc6\xadXQ\xd3%\xa9)k\xcd}\xf2Bm\xb7T%M\x0buI\xc8\x88\x8aZvM\x87\xa8\x15_\x14\xf3\xb6\xcd\x06\x87\xab\x22Z\xe2\xce\xe9vTFus\x1bUI\x87\x98\x82\xf3\xa6\xdc0+i@U\xc9\x80\x92@\xbf\x88\xa6\x92\x8c\xb2\xbc\xd0V-\x159\x09\x0d\x03B\x07\x95\x84\xda\xb2\xe6M\x8b\x1buS\xa8\xd3\x98\xcb\xb2Z\xe6\x15\x0c+\x08\xdac\x1dcG\xaf_\xfd\xb6\x1d\xea\xfb?>\xfa\x13O\xb6c\x91\x0dM\xe7\xb5-\x88\x0b\x14u\x1a\xb0\xe2\x96wx\xc6\xfd.*\xe8\xd0iI]TNVAKY\x8f\x15\xef\xf7\xf7:\xc55\x8d;\xa1[V\xd2 \xe6d6\x07\xc7\xa2\x9a\x22\x8a*:-\x8a\x09\x0cZ\x115b\x87o(\x0b\xc5t\x184\xb7\xf9\xc9\xbe\xb9\xcek\xc4\x15)%\x03\x0a\xc6\xcdJ\x1b\xd0\xe3\x9a\x88\x96\x82\xd0V7e\xb5\xbc\xdf\x8c\x88\xf3\xb6\xba\x22\xaf%\xd4%\xb0\xae*\xea.\x817t+\x89K\xdb\x10\x11U2\xa4\xa2eBA\xa0i\xbb3:\x15\x15D\xc4\x0d*ii\xe8\xb2\xac[\xa8\xa8ST\x14\xc173\xc0\xc2j\xd0\xf5}G?\xf9m\x22\xe4\x91;\xb2g\xc6\xc3\xcb\x91+~\xdaW\x0c{\xc9\xddn\xd9\xe1&\xaa*F\x05\xee\xf1\x155{\xd4\x9c\x97\x15\xda\x22\xa5\xe9\xaauq#\xe2\xf2\xca:\x8c;e\xd2\x8a\xaa\xacNs\xf2\x0e*k\xdb\xd0-e\xd9\x9amVUt\xe90\xe7n\xd77c\x92r\x88([\xd5g]VCI\xbf\xbc~s\x06\x14\xd5\xb4\xa4l\xe8\xd0\x12\x17\xd7a\xdc\xaa\x9c\xa8eE\x09{\x1d5h\xb7.g]\x12Hh\xc9Z\xd3\xaf!\xc4\x00\xb6\x9a\x16\x9a\xd6/kY[\x03#N\xb8S^\xc3\x86\x11\x97e\xd0aQLLJ\xa0.\x22\xa2\xe4\x0ec\x96\x0c\xfb\xba>1\x0d\xeb\xedt\xe47&\x9e\x9a\xfe\x16\x12\xf2\x96\x873?\xdc8\xd08\x1b_\x7f\xf8\xd0g\xef\x8a\x8f\x14\x82P\xdc\x86!%IM#\xa6\xfd\xa0\x7f\xd4\xa3iT\xc5-CJz\x9c3d\xce\xa4\xba\x8b\xb6Xr\xc0q$=\xec\xa8\x9c\x19\xbdJ\x92\x9buHMKL g\xd5!7\xad\x0b\xb4\x8c*k\xaaK\x0bD\xd4DeE\x14=\xe1y\x13\xd6\xad:\xec\x82!7\x1cq\xd9v\xafk\x0aD\x05\xf2F\x84J2\x02U=V$\x0c\xca\xe87\xaf\xd3\x84\x13\xdep/n\xba\xe4I'\xf5n\x9ey\x0dM\x8c\xdb0jJ\xa7\x9aE\x81\x8a\x836l\xd8jZ\xb7\x15%Y\x0b\x88\xa8\xe8\x90\xd1T\xb1\xdd\xa0\xe3\x1ew]\xcd\x80\xbc\x013\xc6\xadk\xb4\xfa\xa2\x9f\xbc\xcb\xc9o\x11!?\x16-\x8e\xd7\xde_\xea\xcf^{\xcf\x7f\xba\xe9fu9v>\x96DUSJR[]\xda\x98\x9b\xeaztk\xa0\xadGI\xc16g<\xe5%\xf7l\xc6\xbe$6\x8d\xa8\xec\x90\x05\x9d\xd2:-\xbb\x85\xfb\x5cR\xd7\xb2\xaeW\xda\xb4>5\x03\xa8\xca\x1b\xb2\xa2SYC\xa7@N\xda\x82\xb2\xa4\xb2\x09m\x05I\x19-%-\x05#\x9bU@\x072n\xe8Qu\x87S\xd2\x92Z\xaaR64tZ\xd5\xa9WS\xcd\x84\xc3\x9b\x9d\xbfQ\xddn\x19\xb1\x22#\x82\xba\x83N\xab\x0blQ\xb2&\xad\xa8SA[[LhH]\xd2>'5u\xe85\xadf\xa7\xba\xa6\x9c\x88\x96\xee\xb0\x106\xe4\xc4\xc3\x8e\xe8\xf6\x8f\xfe\xf2\x9f~K\x0e\xf5\xff5\xf8\xed\xf6\xcf\xfd\xc0\x03\x7f\xd0xt\xe8}\xc7\xdaK&\xe3\xd7\x22M\xfd\xd6\xecW\xb3EJ`\xbb@\xc1\x9d\xa2\xd2\xf2\x22\x92B\x15\x09\x15}\xc6}\xc9>\xabF]\xb7f\x87\x17\x0d))\x0b]\xd4g\xce\xa2EmU\x1d\xba]\x17\x11JY\x95U\x17\xbaj\xd0\x94\x11-\xeb\xba\xe5\x8dj;dn\xb3\xcf\xeb\x11)\xa3\x96\xbcE`HQ\x5c\xaf\x8a\x9b\xeev\xcd\x90\xbaU\xc3\x16\x8d\xab\x8ak\x19\xb1dDU\xc4\xa8\xedn\xe9\xd5oM\xaf\x9c\x0d\x17\xe44\xbd\xdd\x8c\xa8.\x0bZ\xf6n\xda\xd5*\xee\xd3vR\xbf\xa6QmQI\x19\xa3\xba\xb5\xa4\xb5\xf5)\xe8\xd2\xa3j\xde\xa4\xa4\x86\x0eu\xa7\xc3\x07\xdbW\xea\xc5\xf8xP\x0c:\x83\xf3\x9f)\x9e~\xed\x1bsW\xbe\x05\x0ay[\xf0\xe5\xf0c\xff|\xf6_m\xb4u%\x22\xc5 *j\xc0\x9c\xcafS[\x5c\xa8\xd3e\xbd\x864\xc2b\xb0+\xdc\x10\xd7\xb2d4\xe8\x0bgtX\xf4\x98?3d@C\x97\xd7\xfd\x9c\xdf\xb1\xc5XP\xf4N_\xb3aV\x9f.\x93.\xd90\xa0\xe9\x96\xb2\x94\x84\x9a\xc0>\xd7\x1d\xf2\x92\xa6\x82\xa6N]\xca\x029\x11k\x22\xa2:E\xb4l\x08\x0d\xaa\xabH\x99w\xd81\x83\xda\xf2\x0e{\xc5\x13NH\xeaR4\xe3\x1e7\x14\x0cI[\xb5\xacGK\xce\xaa\xb4\x01k\x16\xedv]\xce\xa4\x93\x06\xa5]3\xa4h]\xce\xb0S\x06\xddc\xc6\x9a\x96\xa6\x9c\x0e\x0bJz\xec7o\xd9\x80\x96U[\xcc\x0b\xc2!\x9d\xe6\x83.[\xf4\xfat\xfd-\x0b\xcf\x1c;\xf0l-\xfd\xf8\xe5\xe8B\xf7j;\xfb\xc17\xbe%\x96\xf5\xde\xd1\xcf6\xff\xf4R\xd0uZ\xbay=6\x11\xbe\x12f\x22\xe9\xd6Ld \x98o\x1f\xa9]\x94MtESba&\xe8\x13s\xdd\x8caS\xb8;\x9c\x09\xea\xf6KZ\xd1\xd6\xab\xcf\xb0\xa8\x97\x8d;\xee\xbcx\xb9\x1d\xcd\xc4\xa2\xf5\x81h3\xda\xd3~6\xf1v\xaf\x88\xe8uK\xd26\xa7\xf5\x1a\xb0&\xad\xd4N\x85\x8d\xf6\x81\xe6\x17#\x07\x92\x15\xf3\x0el\xaeI\xca\xeb\xb2,\xae\xdf\x9c\xb6\x1eUs\x1esI\xa0(!\x22\xa3\xad\xa5K\x5cQ\xdc\x94\xbd\xa66\x97\xd5\xa7\xc5mqJ\x97\x1d\x8e+\xeb1\xa0\xa5\xac.'\xe5\x86\xb8\xc1F\xce\x05\xdb\xc3\x85D\xbc\xda\x8e\x15##\x91\x84N\xa7\xc3=\xe19Y\xcdH\x87\x98f\xbb\x15\xc9\xb4\x1b\xe1v'\xa2\xb9v\xdaBp 8\xa9KU\xef\xd9#g\x8e&\x22_\x18\xca]\x5c\xef4ZY\xdb\xbf\xa7Z\x1f\xd9\xb8\x1e\x86K\xbf\xdf\xd9\xfc\xa3\xe6?\x91\x90|\xf0\x93{\xb7\xff\x8b\xff\xed\x8fv~\xf4\xd1\xc7\xe3c\x93\x89\xcf\xb9\xd7\xbc\x85V_5\x9a\xd9R}\xfd\xec\xb6\xae\xe0\xe6\x1d\xd9\xcb\x91;\x8e\xaf\x87\xcd-K\xa3\xb3\xcb\xbbo,NU\x9a\x1d\xb9\x8ew\xf7\xfc\xd9b1\x99JG\xb7$r\xd7_[[i\xc5\x22\xb7\x22\xef\x08\xffU\xf4{\x1f(\xb6\xd2\xef*?\xd8^n\xcdm\x1fX)\xcc\xa4\x86\x07\xd6\xd2\x09\xddJ\x22\x1er\x5c\xbf@L\xe0\xb2d9\x16+\xe6\xa7\xe7v\xf7\x14\x96\xabG\xb6Y\xd6\x14\x185\xa3G\xd6\xb5p\xd2\x8a\xb8\x94\xba\x9c\xb6kz\x94\x8d\x9b\x92UU\x92\xda\xb4\x90\xa4\x97\xc2\x91\xa0C\xd1vW\xa5M9\xa0\xa9f\xddvQW\x0d\x8a\xdbi^\xbf\x96|p\xb3\xb0\x11D\x1b\xe3\xc1\xc5\x9e\x07\xa6\xe6\x06\xf6\xa5N\x05\xfdJFtZW\x15\xb7lHT\xd5\xac\xac\xb9\xd6\xb6h(\xae\xd3\xfa3\xedf\xff\x5c\xe5\x8d\x95\x17\x0a\x03\xd9K\xcd\x1d\x89J\xae\xf8oV\xfe\xba\xfb\x17\xe6\xd5?6\xfa\xf1i\xd1_\xf7\xeb\xado\xd1_\xd6\xcf=}\xd7k\x7f\x19\x1bxg\xdf\xd9\xc2#7\xbe\xd0\xf9#+\xff\xe5\xa9\xe3\x137>\xf1\xf9;_\x9e^I\xf4\xec\x8b\x9e8\xbae\xf6z\xcf\xc9d\xb5\xfa\xbd\xe3\xdb\xa2'\x9e^{5\xfa\xda7\x96V\x8b\xff\xfa]/\x17*]\x99\x95G\x22\xffxz.\xdc\x97Y\x8dTZ;\x13K\xf5X\xbb\xa3>\xf6\xd6\xdf\xfd\x92\xea\xbbv\xf7<6t\xf4\xc2\xff\xd8\xbb;r\xb5\xda\xee\x8a-\x9d:Pi\x8e\x04#\xad\xaf\xa7Z)\xcdDy\xea\xf8\xe5w\xaf\xff|\x85\x9f}\xef\xd2\xafu\xfdR\xa3\x94\x09R\x0fiG\x9f\xed\x7f\xf0\xdc\xe1\x8f\xfc\xcc\xb0\x84\x04bB\x11D-\xb8%\xa3$eF\xafo\xd8\xeb\x86\xbb\xe4\xf5\xf9\xb2\xa4w\x1a\xf5\x0do\xf1y?\xe5?\x86\xef\x0fr\x1a\x9a\x92Zb\xa2J\xd2\xe1\x7f=\xf6\xca\xaf>v\xee\xf2\x1dW\xa6\x17;\xdfw~=\xf7\x0f\xa9\x7f\x9b:\xa1\xcf\xe9fy\xa3\xd5\xde\xd3\xee\xea\x98\x8b\xde\x9f8\x9b\xffL\xfcV\xbd\xde\x9a\xaa\xbc\xfb\xc9\xe2k\xadD6\xbbev\xc5\xf9\xfa\x03\xd1\xc5\x5c\x18\xe9\xaeH\x9d\xd9\xfd\xc0\xfeF}\xee\xfaw\xaf?\xf9\xca\xef$\xfe\xa0z8*\xec\x0e\x9f\x0f\xbf\x05\x84|\x7f\xf0\xb7\xe1/&\xae%\xfe\xbe\xf4\xe3\x9d\x7f\x5czg\xe7\x17KoM=\xf7\xdf$\x9f\xffqg\xa9\xf2\xf3\xad\xefM\xb5c\xb3\xcd{\x06f;\x9e\xae\xfd\x0f7\x7f<\xb9\xfa\xf6\xa0\xd7\xcb\x7fw\xe5\xd9\xc9\x7f\xac\x5c\x18z\xe7\x91Wn\xdc\xff\xdc\xa7\xe3\x890\x1a\xf4\x1e.\xec\xed\xaa7\xc3h<\x884V&\xc2\xe3=\xbb\x8a^=\xd2\xf8\xb3]_\xbeX\xae\xfd\xc5\xe1\x17k\xfd\xf5\x83\xdd\xab\xa9\xb5\xf6\x0f\xbf\xf0_\xee_\xc9v\xb7\xa6j\xdb[\xbf\xf55v\xc4\xbe\xf7\x9f\xb7\x06\xf2\xd5\xcb\x06\xfa\x17\xd7\xef\xea\xfa\xfa\xb99\xc9\xec|\xa1\xd1\xe8\xca&W\xda=F\xf67\x96+\xbb\xabA2\x92\x9b\xaf4\xab\x8b\xd5\x9d\xa5\x85\xe4\x91\xca\xd7\x5c\xb9\xa8\xff\xf1z}\xe4l\xac\x1c\xf9!oD\xf7\xc5\x17\xa2\xdbs\xcf\xdd\xdc\xd3q\xa5t\xa3\x8dT\xb6YlkJ\xbe\xbf'\x08\x06\xb2\xa5\xae\xe2\x03\xe9\xbf^\x9f9\xd0\xb9\xb2\xe4\xfer\xbc\x1c\x9fHO\xd5\x93\xc3\xc9\xca\xd8\x81\x9b'\xf6e\x8e\x86\xa3:\xf7\xb4\xc2\xf9\x0b\x83\xf9\xfe\x85Xs\xa99R9\xddL\xe8\x09\xc6\xda3A\xa9\xb1=\xf1s\xcf\xfe\xd1\xc4\x1f^\xfd\xcdC\xa7\xdb\x13_\x9e\xf6\xaf\x9b\x7f\xb7\xcb\xb5f\xfb\x03\xe1\xb7D!?\x13\xfc^\xc8G\x83\xff\x14>\x11<\x1f\x1e\x09^\x0f\xf9\xcd\xc8\xd7l\x0b\x17\xf4\xf9\xf8?iZ\xfb\x17G+\xb1R\xa4\x9a\x1a\x89\xfe\xfb\xb3\xfc\xe7\xe8W\xd3\x97\x82\xc1`\xac\xbd\x11\x89\x84\xd9\xf2h\xc7\xe50k*\xd8\xd9\xee\xaa\x5c\x0d\x0e\x85?\x1b\xf2\xf6\x84\xf4r+\x16\xbbsd\xed\xf1l\xab\x164BA;\xac\xc6V\x1a\xf7\xf57\x8a\xf1\x85\xe2hb\xbeo!=\xd2\x119\x93\x1d\xed\xcf\xcc\x0evM\xf5\x0c\xf5%\xc2fP\x08\xb64\xe2\x22\xedT~\xc1W\x83Xlv\xe6\x7f:y\xabvzt2\xfd;\x9f\xfa\xf0\xc4\xc2\xc8\xcem\x83\xaf\x15\xd2\x8b\xfb\xcfW\xa2\x89-\xdf\xb8o\xecX\xfa\x8d\xcb\x8f'\x9e\xcd\x1e,}f\xfe\xd1\x8e?Z\x0a\xa3A\xebs\xd1\xf7\xb4\xb8'x\x8d0\x0c\x08\xc2\xb7\xc4~\xac\xbb#}-\xf2r\xf9\x91D\xc9?\x14\x16\xc2\xef\xef\xbf?X\xad\x15,G?W~G\xfa\xdf\xcf\xf2\xb7\xc1\xf7\x7f\xdb\xd6\xec\xbd\x897\xf1&\xde\xc4\x9bx\x13o\xe2M\xbc\x897\xf1&\xde\xc4\x9bx\x13o\xe2M\xbc\x897\xf1&\xde\xc4\x9bx\x13o\xe2M\xfc\xff\x0d\xff\x17\xecjz\xba\xae\xcbJ$\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = "\x00\x07\x07\x0f8\x93\x00p\x00i\x00x\x00m\x00a\x00p\x00s\x00\x08\x06NY\xc7\x00m\x00o\x00n\x00k\x00.\x00p\x00n\x00g"
qt_resource_struct = "\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x14\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| agpl-3.0 |
yanxiangtianji/MyToolDigest | BunchExperiment/ResultAnalysis/avg-table.py | 2 | 1202 | import sys
data0='''
'''
def parse(data0):
data1=[l for l in data0.split('\n') if len(l)>1]
data_avg=[]
data_std=[]
for l in data1:
x=[float(v) for v in l.split('\t')]
n=len(x)
m1=sum(x)/n
m2=sum(v**2 for v in x)/n
data_avg.append(m1)
data_std.append(m2-m1**2)
return data_avg, data_std
def write(data_avg, data_std, fout):
n=len(data_avg)
for i in range(n):
#fout.write(str(data_avg[i])+'\t'+str(data_std[i])+'\n')
fout.write('%.9f\t%.9f\n' % (data_avg[i], data_std[i]))
def main(infile, outfile=None):
fin=open(infile)
data0=fin.read()
fin.close()
data_a, data_s=parse(data0)
if outfile:
fout=open(outfile, 'w')
else:
fout=sys.stdout
write(data_a, data_s, fout)
if outfile:
fout.close()
if __name__ == '__main__':
argc=len(sys.argv)
if argc < 2:
print('Average each row of a table and output a table with two column representing their mean and standard derivation')
print('Usage: <infile-name> [outfile-name]')
exit(1)
infile=sys.argv[1]
outfile=sys.argv[2] if argc > 2 else None
main(infile, outfile)
| gpl-2.0 |
henaras/horizon | openstack_dashboard/dashboards/project/databases/workflows/create_instance.py | 31 | 16525 | # Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
class SetInstanceDetailsAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Instance Name"))
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
volume = forms.IntegerField(label=_("Volume Size"),
min_value=0,
initial=1,
help_text=_("Size of the volume in GB."))
datastore = forms.ChoiceField(label=_("Datastore"),
help_text=_(
"Type and version of datastore."))
class Meta(object):
name = _("Details")
help_text_template = "project/databases/_launch_details_help.html"
def clean(self):
if self.data.get("datastore", None) == "select_datastore_type_version":
msg = _("You must select a datastore type and version.")
self._errors["datastore"] = self.error_class([msg])
return self.cleaned_data
@memoized.memoized_method
def flavors(self, request):
try:
return api.trove.flavor_list(request)
except Exception:
LOG.exception("Exception while obtaining flavors list")
redirect = reverse("horizon:project:databases:index")
exceptions.handle(request,
_('Unable to obtain flavors.'),
redirect=redirect)
def populate_flavor_choices(self, request, context):
flavors = self.flavors(request)
if flavors:
return instance_utils.sort_flavor_list(request, flavors)
return []
@memoized.memoized_method
def datastores(self, request):
try:
return api.trove.datastore_list(request)
except Exception:
LOG.exception("Exception while obtaining datastores list")
self._datastores = []
@memoized.memoized_method
def datastore_versions(self, request, datastore):
try:
return api.trove.datastore_version_list(request, datastore)
except Exception:
LOG.exception("Exception while obtaining datastore version list")
self._datastore_versions = []
def populate_datastore_choices(self, request, context):
choices = ()
set_initial = False
datastores = self.datastores(request)
if datastores is not None:
num_datastores_with_one_version = 0
for ds in datastores:
versions = self.datastore_versions(request, ds.name)
if not set_initial:
if len(versions) >= 2:
set_initial = True
elif len(versions) == 1:
num_datastores_with_one_version += 1
if num_datastores_with_one_version > 1:
set_initial = True
if len(versions) > 0:
# only add to choices if datastore has at least one version
version_choices = ()
for v in versions:
version_choices = (version_choices +
((ds.name + ',' + v.name, v.name),))
datastore_choices = (ds.name, version_choices)
choices = choices + (datastore_choices,)
if set_initial:
# prepend choice to force user to choose
initial = (('select_datastore_type_version',
_('Select datastore type and version')))
choices = (initial,) + choices
return choices
TROVE_ADD_USER_PERMS = getattr(settings, 'TROVE_ADD_USER_PERMS', [])
TROVE_ADD_DATABASE_PERMS = getattr(settings, 'TROVE_ADD_DATABASE_PERMS', [])
TROVE_ADD_PERMS = TROVE_ADD_USER_PERMS + TROVE_ADD_DATABASE_PERMS
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
contributes = ("name", "volume", "flavor", "datastore")
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
def __init__(self, request, *args, **kwargs):
super(SetNetworkAction, self).__init__(request, *args, **kwargs)
network_list = self.fields["network"].choices
if len(network_list) == 1:
self.fields['network'].initial = [network_list[0][0]]
class Meta(object):
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
network_list = [(network.id, network.name_or_id)
for network in networks]
except Exception:
network_list = []
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
template_name = "project/databases/_launch_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
return context
class AddDatabasesAction(workflows.Action):
"""Initialize the database with users/databases. This tab will honor
the settings which should be a list of permissions required:
* TROVE_ADD_USER_PERMS = []
* TROVE_ADD_DATABASE_PERMS = []
"""
databases = forms.CharField(label=_('Initial Databases'),
required=False,
help_text=_('Comma separated list of '
'databases to create'))
user = forms.CharField(label=_('Initial Admin User'),
required=False,
help_text=_("Initial admin user to add"))
password = forms.CharField(widget=forms.PasswordInput(),
label=_("Password"),
required=False)
host = forms.CharField(label=_("Allowed Host (optional)"),
required=False,
help_text=_("Host or IP that the user is allowed "
"to connect through."))
class Meta(object):
name = _("Initialize Databases")
permissions = TROVE_ADD_PERMS
help_text_template = "project/databases/_launch_initialize_help.html"
def clean(self):
cleaned_data = super(AddDatabasesAction, self).clean()
if cleaned_data.get('user'):
if not cleaned_data.get('password'):
msg = _('You must specify a password if you create a user.')
self._errors["password"] = self.error_class([msg])
if not cleaned_data.get('databases'):
msg = _('You must specify at least one database if '
'you create a user.')
self._errors["databases"] = self.error_class([msg])
return cleaned_data
class InitializeDatabase(workflows.Step):
action_class = AddDatabasesAction
contributes = ["databases", 'user', 'password', 'host']
class AdvancedAction(workflows.Action):
initial_state = forms.ChoiceField(
label=_('Source for Initial State'),
required=False,
help_text=_("Choose initial state."),
choices=[
('', _('None')),
('backup', _('Restore from Backup')),
('master', _('Replicate from Instance'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'initial_state'
}))
backup = forms.ChoiceField(
label=_('Backup Name'),
required=False,
help_text=_('Select a backup to restore'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-backup': _('Backup Name')
}))
master = forms.ChoiceField(
label=_('Master Instance Name'),
required=False,
help_text=_('Select a master instance'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-master': _('Master Instance Name')
}))
class Meta(object):
name = _("Advanced")
help_text_template = "project/databases/_launch_advanced_help.html"
def populate_backup_choices(self, request, context):
try:
backups = api.trove.backup_list(request)
choices = [(b.id, b.name) for b in backups
if b.status == 'COMPLETED']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select backup")))
else:
choices.insert(0, ("", _("No backups available")))
return choices
def populate_master_choices(self, request, context):
try:
instances = api.trove.instance_list(request)
choices = [(i.id, i.name) for i in
instances if i.status == 'ACTIVE']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select instance")))
else:
choices.insert(0, ("", _("No instances available")))
return choices
def clean(self):
cleaned_data = super(AdvancedAction, self).clean()
initial_state = cleaned_data.get("initial_state")
if initial_state == 'backup':
backup = self.cleaned_data['backup']
if backup:
try:
bkup = api.trove.backup_get(self.request, backup)
self.cleaned_data['backup'] = bkup.id
except Exception:
raise forms.ValidationError(_("Unable to find backup!"))
else:
raise forms.ValidationError(_("A backup must be selected!"))
cleaned_data['master'] = None
elif initial_state == 'master':
master = self.cleaned_data['master']
if master:
try:
api.trove.instance_get(self.request, master)
except Exception:
raise forms.ValidationError(
_("Unable to find master instance!"))
else:
raise forms.ValidationError(
_("A master instance must be selected!"))
cleaned_data['backup'] = None
else:
cleaned_data['master'] = None
cleaned_data['backup'] = None
return cleaned_data
class Advanced(workflows.Step):
action_class = AdvancedAction
contributes = ['backup', 'master']
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:databases:index"
default_steps = (SetInstanceDetails,
SetNetwork,
InitializeDatabase,
Advanced)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(LaunchInstance, self).__init__(request, context_seed,
entry_point, *args, **kwargs)
self.attrs['autocomplete'] = (
settings.HORIZON_CONFIG.get('password_autocomplete'))
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
return message % {"count": _("instance"), "name": name}
def _get_databases(self, context):
"""Returns the initial databases for this instance."""
databases = None
if context.get('databases'):
dbs = context['databases']
databases = [{'name': d.strip()} for d in dbs.split(',')]
return databases
def _get_users(self, context):
users = None
if context.get('user'):
user = {
'name': context['user'],
'password': context['password'],
'databases': self._get_databases(context),
}
if context['host']:
user['host'] = context['host']
users = [user]
return users
def _get_backup(self, context):
backup = None
if context.get('backup'):
backup = {'backupRef': context['backup']}
return backup
def _get_nics(self, context):
netids = context.get('network_id', None)
if netids:
return [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
return None
def handle(self, request, context):
try:
datastore = self.context['datastore'].split(',')[0]
datastore_version = self.context['datastore'].split(',')[1]
LOG.info("Launching database instance with parameters "
"{name=%s, volume=%s, flavor=%s, "
"datastore=%s, datastore_version=%s, "
"dbs=%s, users=%s, "
"backups=%s, nics=%s, replica_of=%s}",
context['name'], context['volume'], context['flavor'],
datastore, datastore_version,
self._get_databases(context), self._get_users(context),
self._get_backup(context), self._get_nics(context),
context.get('master'))
api.trove.instance_create(request,
context['name'],
context['volume'],
context['flavor'],
datastore=datastore,
datastore_version=datastore_version,
databases=self._get_databases(context),
users=self._get_users(context),
restore_point=self._get_backup(context),
nics=self._get_nics(context),
replica_of=context.get('master'))
return True
except Exception:
exceptions.handle(request)
return False
| apache-2.0 |
tumbl3w33d/ansible | lib/ansible/modules/cloud/amazon/aws_netapp_cvs_FileSystems.py | 19 | 12010 | #!/usr/bin/python
# (c) 2019, NetApp Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""AWS Cloud Volumes Services - Manage fileSystem"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_netapp_cvs_FileSystems
short_description: NetApp AWS Cloud Volumes Service Manage FileSystem.
extends_documentation_fragment:
- netapp.awscvs
version_added: '2.9'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, Update, Delete fileSystem on AWS Cloud Volumes Service.
options:
state:
description:
- Whether the specified fileSystem should exist or not.
required: true
choices: ['present', 'absent']
type: str
region:
description:
- The region to which the filesystem belongs to.
required: true
type: str
creationToken:
description:
- Name of the filesystem
required: true
type: str
quotaInBytes:
description:
- Size of the filesystem
- Required for create
type: int
serviceLevel:
description:
- Service Level of a filesystem.
choices: ['standard', 'premium', 'extreme']
type: str
exportPolicy:
description:
- The policy rules to export the filesystem
type: dict
suboptions:
rules:
description:
- Set of rules to export the filesystem
- Requires allowedClients, access and protocol
type: list
suboptions:
allowedClients:
description:
- Comma separated list of ip address blocks of the clients to access the fileSystem
- Each address block contains the starting IP address and size for the block
type: str
cifs:
description:
- Enable or disable cifs filesystem
type: bool
nfsv3:
description:
- Enable or disable nfsv3 fileSystem
type: bool
nfsv4:
description:
- Enable or disable nfsv4 filesystem
type: bool
ruleIndex:
description:
- Index number of the rule
type: int
unixReadOnly:
description:
- Should fileSystem have read only permission or not
type: bool
unixReadWrite:
description:
- Should fileSystem have read write permission or not
type: bool
'''
EXAMPLES = """
- name: Create FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
exportPolicy:
rules:
- allowedClients: 172.16.0.4
cifs: False
nfsv3: True
nfsv4: True
ruleIndex: 1
unixReadOnly: True
unixReadWrite: False
quotaInBytes: 100000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
- name: Update FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
exportPolicy:
rules:
- allowedClients: 172.16.0.4
cifs: False
nfsv3: True
nfsv4: True
ruleIndex: 1
unixReadOnly: True
unixReadWrite: False
quotaInBytes: 200000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
- name: Delete FileSystem
aws_netapp_cvs_FileSystems:
state: present
region: us-east-1
creationToken: newVolume-1
quotaInBytes: 100000000000
api_url : cds-aws-bundles.netapp.com
api_key: Q1ZRR0p0VGNuZ3VhMnJBYk5zczM1RkZ3Z0lCbUE3
secret_key : U1FwdHdKSGRQQUhIdkIwMktMU1ZCV2x6WUowZWRD
"""
RETURN = """
"""
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp_module import NetAppModule
from ansible.module_utils.netapp import AwsCvsRestAPI
class AwsCvsNetappFileSystem(object):
"""
Contains methods to parse arguments,
derive details of AWS_CVS objects
and send requests to AWS CVS via
the restApi
"""
def __init__(self):
"""
Parse arguments, setup state variables,
check parameters and ensure request module is installed
"""
self.argument_spec = netapp_utils.aws_cvs_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
region=dict(required=True, type='str'),
creationToken=dict(required=True, type='str'),
quotaInBytes=dict(required=False, type='int'),
serviceLevel=dict(required=False, choices=['standard', 'premium', 'extreme']),
exportPolicy=dict(
type='dict',
options=dict(
rules=dict(
type='list',
options=dict(
allowedClients=dict(required=False, type='str'),
cifs=dict(required=False, type='bool'),
nfsv3=dict(required=False, type='bool'),
nfsv4=dict(required=False, type='bool'),
ruleIndex=dict(required=False, type='int'),
unixReadOnly=dict(required=False, type='bool'),
unixReadWrite=dict(required=False, type='bool')
)
)
)
),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['region', 'creationToken', 'quotaInBytes']),
],
supports_check_mode=True
)
self.na_helper = NetAppModule()
# set up state variables
self.parameters = self.na_helper.set_parameters(self.module.params)
# Calling generic AWSCVS restApi class
self.restApi = AwsCvsRestAPI(self.module)
self.data = {}
for key in self.parameters.keys():
self.data[key] = self.parameters[key]
def get_filesystemId(self):
# Check given FileSystem is exists
# Return fileSystemId is found, None otherwise
list_filesystem, error = self.restApi.get('FileSystems')
if error:
self.module.fail_json(msg=error)
for FileSystem in list_filesystem:
if FileSystem['creationToken'] == self.parameters['creationToken']:
return FileSystem['fileSystemId']
return None
def get_filesystem(self, fileSystemId):
# Get FileSystem information by fileSystemId
# Return fileSystem Information
filesystemInfo, error = self.restApi.get('FileSystems/%s' % fileSystemId)
if error:
self.module.fail_json(msg=error)
else:
return filesystemInfo
return None
def is_job_done(self, response):
# check jobId is present and equal to 'done'
# return True on success, False otherwise
try:
job_id = response['jobs'][0]['jobId']
except TypeError:
job_id = None
if job_id is not None and self.restApi.get_state(job_id) == 'done':
return True
return False
def create_fileSystem(self):
# Create fileSystem
api = 'FileSystems'
response, error = self.restApi.post(api, self.data)
if not error:
if self.is_job_done(response):
return
error = "Error: unexpected response on FileSystems create: %s" % str(response)
self.module.fail_json(msg=error)
def delete_fileSystem(self, fileSystemId):
# Delete FileSystem
api = 'FileSystems/' + fileSystemId
self.data = None
response, error = self.restApi.delete(api, self.data)
if not error:
if self.is_job_done(response):
return
error = "Error: unexpected response on FileSystems delete: %s" % str(response)
self.module.fail_json(msg=error)
def update_fileSystem(self, fileSystemId):
# Update FileSystem
api = 'FileSystems/' + fileSystemId
response, error = self.restApi.put(api, self.data)
if not error:
if self.is_job_done(response):
return
error = "Error: unexpected response on FileSystems update: %s" % str(response)
self.module.fail_json(msg=error)
def apply(self):
"""
Perform pre-checks, call functions and exit
"""
fileSystem = None
fileSystemId = self.get_filesystemId()
if fileSystemId:
# Getting the FileSystem details
fileSystem = self.get_filesystem(fileSystemId)
cd_action = self.na_helper.get_cd_action(fileSystem, self.parameters)
if cd_action is None and self.parameters['state'] == 'present':
# Check if we need to update the fileSystem
update_fileSystem = False
if fileSystem['quotaInBytes'] is not None and 'quotaInBytes' in self.parameters \
and fileSystem['quotaInBytes'] != self.parameters['quotaInBytes']:
update_fileSystem = True
elif fileSystem['creationToken'] is not None and 'creationToken' in self.parameters \
and fileSystem['creationToken'] != self.parameters['creationToken']:
update_fileSystem = True
elif fileSystem['serviceLevel'] is not None and 'serviceLevel' in self.parameters \
and fileSystem['serviceLevel'] != self.parameters['serviceLevel']:
update_fileSystem = True
elif fileSystem['exportPolicy']['rules'] is not None and 'exportPolicy' in self.parameters:
for rule_org in fileSystem['exportPolicy']['rules']:
for rule in self.parameters['exportPolicy']['rules']:
if rule_org['allowedClients'] != rule['allowedClients']:
update_fileSystem = True
elif rule_org['unixReadOnly'] != rule['unixReadOnly']:
update_fileSystem = True
elif rule_org['unixReadWrite'] != rule['unixReadWrite']:
update_fileSystem = True
if update_fileSystem:
self.na_helper.changed = True
result_message = ""
if self.na_helper.changed:
if self.module.check_mode:
# Skip changes
result_message = "Check mode, skipping changes"
else:
if cd_action == "create":
self.create_fileSystem()
result_message = "FileSystem Created"
elif cd_action == "delete":
self.delete_fileSystem(fileSystemId)
result_message = "FileSystem Deleted"
else: # modify
self.update_fileSystem(fileSystemId)
result_message = "FileSystem Updated"
self.module.exit_json(changed=self.na_helper.changed, msg=result_message)
def main():
"""
Main function
"""
aws_cvs_netapp_filesystem = AwsCvsNetappFileSystem()
aws_cvs_netapp_filesystem.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
mbadarkhe/davinci-next | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
realityone/libtrust-py | example.py | 1 | 3744 | from libtrust import jsonsign
from libtrust import rsa_key
pem_data = b"""-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEAwq1mCmgn460MC6MnCqranQNTgmKuKPl7bNH7Qc6hBDGHlnIj
U6q/h2KXF37TC5Y9tsKvQ4b8jd0Sf0dXFHml8qunSvNnqsSvoD8tSPUKqXS6jrlb
GSQXhya7BL1RPGccD5K1xrV73QlI6uFPd3APRQYij5EOB8IOWEQujJk/8Mjc0EC9
zvk5TUJb59hkOUPZ3CkvSBNLNS8wpQI98FRnIzHjuaNicqve8054oxmDKifHWy0n
nF135cXW8zkH3Zto1q89zD2g+zcVxLcRP84Uhe0nSQyg7vEYl4Wl74Eo6/89qL2y
E0mEiQN245ACA5B8WFV/t3j/OD3ydOCaAOg28vQtzcZ1gh2Ev4RxeR7bKq58g+R0
+MMwl7nnW29mbCkcgdVVR4YPmglP7Vb6w7/NbqFhnxx4E3A05AeevHdMdYCrtgQw
ogvIhdOHLcVQxJgwy1d2Lg/mv9rovhCJ7d3XaNEYym6CplCHPMtfnU1LCVkA6b44
pFaOVjsAQ8FviFtGXQAToRtwoszSarzslHKYdPoSGFOsgNJgW67iViOYqGPD97rg
JA0VPm0POMNHGw/R6o+08KhDF/OI1EDckmjXhUggY/WCqWDxD77Ezd/wr9Zlbv/u
SIEL9ifvBLq06lLcXMLrQbJrwMbDrngMZMAcUkTzThmtxNs4uwu45R+zfKcCAwEA
AQKCAgEAtu9aQ808LqYd+5GEznFenMzTXGJ+ZeKKKOfowx34EIi6cJUwvR5mfEIY
2OtERk8YDvVC3KGsEWL8Tr4rBgKJ/k9vFO9FKyNIJb04QKaDLlmSNSvYfvd7ZHTw
qLN98tSxebDTP7aqfjqLWqv+kK2sq5/oOiCEnqWr9SWc2GHpw8n8NXWg5y0qu37v
/h1JkMZBorDQzVnUAlYlz+kbawrlIB1xcLAngroe92N12U3QA3z9yJ/V6Qmr8S7H
niapTYUMLzDdUV9YNri8q+2bN+nfPzprACnt0JqeEUR1eWpVme5vcnFPNPCQqm+m
+JAKVG8haaBuM2pv6dnMTCgCj3emqWLVfBoc3qmi1KJT/dG54GRepIyN82jFDByK
qQGMMO5/Chf2DlRYQYBrkPI5hIZLvbU+a1K5Uf1wauNpGgiGCEjxiXsYGUPyCjAg
MmNwnNjfOO7U5KQQMV1PbEj1iPU0xw/Q7adqKd4UeD/rwaTo00KcH6K7/1pFZP3U
rkcQ5de9nI/jULIF7YCPqZxs5/dpK8HGwF5VroYIjyVm5AVh9xaE3sugxf8nsdop
LybIwcR7nk2RCibW7ClbsJd7eTrYiuPBI50Lb3I+CLczo6VgvlnnqhVDs/kYDZA9
c4j11ayAW7l4zc47cPjK6M+ggvL4zqc2n7Ba0Z2Med07hiNrHwECggEBAOqpd9SB
2jJ4V7TNAb9d2sKl87o1TH3c1RKQQNTRBnYohGq6C5X6V+rR5q5SZ2BCYxJOacF5
XU3LuXzgYKYRGffOVUfDu5zocbbGDbdESXv9+EknlHmTStpAkgfuLsvE0iAlHqcy
9XufbI+6cx1zc+n8WxvkoxpPHVKYYEtULJiRrBcNYXI2LP8rxnwD6UW9GPKrOMjN
8niiyqvqltyBY4gLK68O8NiBF5lEIMl4Xb/Q8kkj6+1qwy9gFeclelqLpaAwAT4Z
VfmbQrCLzf+RLvRkzfJBGd04joO49iBXD9DCmpz20E+Ir2FyAH0uz7dnFZacBnws
QbWkZONuaxmd74kCggEBANRhJ+vcrFjrBjflP0fYn2iu0bK37LEbI6+xRPyqVn16
gWlGii2C+g1HNyeESK/SD+DEMPYDJekzlyNehvneo17bCjgyBRxr5Fk8bP4j5Dlp
wT00x4JJQiHXr/TH1cXLTfnQf1COmshGf5qKFqFEGGmRBMMR0WNV59FaViTEghn0
H9xSRKCN7uR51ms/x46wg2ye7tGclyXVJ//iF42RQ7E8/5cGqv3ZlbFtinDlQEih
yARkF3QBsDen3u1ztzCby7faAeJhoAiiYDjfCCcfThdAqZReukJnbiACKbZ2M8GG
3k7XFNctCAAKTo9HY945pzswV52HW2ey95ru4JZ5Tq8CggEAMUMOUuBHrByzXNNE
sKXFTOOFvOt/eVSorlL2KTcQQjHTSoxv7jY1yqfx41qNNRn6rlcjwGf3/GLuN5bq
8zHX37vDD2O5uQDbDmGZc4W0X4E7ZDAY7UTMi/DONzf7Pu+8pN7mBneeLSuUoL+l
duNLzC0b+0kOLHG7WCGA5Y9wJT8/fz9h25Yf8BmCe3peuDMwT5E+RHlnk4epQFno
/bVz7ZVawE9EpE7FY3l34JOSKrh0hIIz/w1QmFt1fabSfrueM3igaibrc5DyeRmA
T0xtLQUUbuzXvycmU+S6VqOwQET6LEVsCaZKGwzRqXXwSTIsyAdNHTg1Oyqdu1js
xt3u8QKCAQB8pzHJTFKUNg4GTLXhs5GM3d8S6MUyBl3hx0hYjJoLOBYw9kgwAkpF
9OC4fvoyyDatlDmwi5R61d8F0XujuTtmd2X+Kc26KtFyVvcaC3LvB9V12T6oh9sb
Bf+uyoP8fiGcWPYBEFJk7owC5r31lYRGoemLnS+rAEb6J+2b0wMRvKnepSLGocfv
rltdw6ebZpsc7AP8X86PVBcQJ2Hvo615n/XcbPt713P3GfZB4Szj9KDzgtQJMNx/
Lja4ZEzHaQofNQQaHXbS2otjlfSxEbzCBSADh74HL7IBc4OMJsCl/EULPU5sJXAm
peYKTrqdOnWfVfZ27XWG3hJai46igzzdAoIBAEIeXITvv9rHWkRDwlzhlzDJsNgI
3IvqygBPRxVmll0kWnbn/z/7T2gVeFeMJxbMOjGGmRAoCtcFvrthaWFnQlhdQMnk
iMe3oAE5n8HaRwzxfXm40p3npvSjYazz3NDF5mEfvnDwFYYLb06uCFfsd9pA1FiU
DelU4L8FphYy+5a8Yqt9P08mKKYtqaop5xURrjI/IFg0Yv2JjsK4ouPbepprXmEu
PEfB+fQ6ms1alyDDHNpyFLfS2bOUBs6aGJMgbDHQiBZiVeZhxXgsVQ+kuft67w/3
wkogkU/eGEG2HR5CkJ59yaVJp6SMK2gcTSRK40bPj0UwMzEgLbPopv2wgF0=
-----END RSA PRIVATE KEY-----
"""
# Sign Content
# Content may be is the manifest
content = {
'hello': '123456'
}
js = jsonsign.JSONSignature.from_map(content)
rsa_private_key = rsa_key.RSAPrivateKey.from_pem(pem_data)
js.sign(rsa_private_key, timestamp=1478423072)
# Get jws
jws = js.jws()
# Parse from jws
js2 = jsonsign.JSONSignature.parse_jws(jws)
jws2 = js2.jws()
print(jws, jws2, jws == jws2)
# Verify jws
print(js.verify() == js2.verify())
| apache-2.0 |
lindamar/ecclesi | env/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/pyodbc.py | 17 | 10416 | # mssql/pyodbc.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Connecting to PyODBC
--------------------
The URL here is to be translated to PyODBC connection strings, as
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
DSN Connections
^^^^^^^^^^^^^^^
A DSN-based connection is **preferred** overall when using ODBC. A
basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC::
dsn=mydsn;UID=user;PWD=pass
If the username and password are omitted, the DSN form will also add
the ``Trusted_Connection=yes`` directive to the ODBC string.
Hostname Connections
^^^^^^^^^^^^^^^^^^^^
Hostname-based connections are **not preferred**, however are supported.
The ODBC driver name must be explicitly specified::
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the
SQL Server driver name specified explicitly. SQLAlchemy cannot
choose an optimal default here as it varies based on platform
and installed drivers.
Other keywords interpreted by the Pyodbc dialect to be passed to
``pyodbc.connect()`` in both the DSN and hostname cases include:
``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A PyODBC connection string can also be sent exactly as specified in
`ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_
into the driver using the parameter ``odbc_connect``. The delimeters must be URL escaped, however,
as illustrated below using ``urllib.quote_plus``::
import urllib
params = urllib.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
Unicode Binds
-------------
The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of
UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
dramatically alter how strings are received. The PyODBC dialect attempts to
use all the information it knows to determine whether or not a Python unicode
literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
can encode these to bytestrings first, some users have reported that PyODBC
mis-handles bytestrings for certain encodings and requires a Python unicode
object, while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
It is for this reason that whether or not unicode literals for bound
parameters be sent to PyODBC can be controlled using the
``supports_unicode_binds`` parameter to ``create_engine()``. When
left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
flag that hopefully should not be needed when the unicode situation stabilizes
for unix + PyODBC.
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
Rowcount Support
----------------
Pyodbc only has partial support for rowcount. See the notes at
:ref:`mssql_rowcount_versioning` for important notes when using ORM
versioning.
"""
from .base import MSExecutionContext, MSDialect, VARBINARY
from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util, exc
import decimal
import re
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class _VARBINARY_pyodbc(VARBINARY):
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# pyodbc-specific
return dialect.dbapi.BinaryNull
return process
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
len(self.parameters[0]):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error as e:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_pyodbc
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc,
VARBINARY: _VARBINARY_pyodbc,
sqltypes.LargeBinary: _VARBINARY_pyodbc,
}
)
def __init__(self, description_encoding=None, **params):
if 'description_encoding' in params:
self.description_encoding = params.pop('description_encoding')
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = self.use_scope_identity and \
self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)
def _get_server_version_info(self, connection):
try:
raw = connection.scalar("SELECT SERVERPROPERTY('ProductVersion')")
except exc.DBAPIError:
# SQL Server docs indicate this function isn't present prior to
# 2008; additionally, unknown combinations of pyodbc aren't
# able to run this query.
return super(MSDialect_pyodbc, self).\
_get_server_version_info(connection)
else:
version = []
r = re.compile('[.\-]')
for n in r.split(raw):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MSDialect_pyodbc
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.