repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
jaloren/robotframework | src/robot/utils/markuputils.py | 6 | 1553 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .htmlformatters import LinkFormatter, HtmlFormatter
_format_url = LinkFormatter().format_url
_generic_escapes = (('&', '&'), ('<', '<'), ('>', '>'))
_attribute_escapes = _generic_escapes \
+ (('"', '"'), ('\n', ' '), ('\r', ' '), ('\t', '	'))
_illegal_chars_in_xml = re.compile(u'[\x00-\x08\x0B\x0C\x0E-\x1F\uFFFE\uFFFF]')
def html_escape(text):
return _format_url(_escape(text))
def xml_escape(text):
return _illegal_chars_in_xml.sub('', _escape(text))
def html_format(text):
return HtmlFormatter().format(_escape(text))
def attribute_escape(attr):
attr = _escape(attr, _attribute_escapes)
return _illegal_chars_in_xml.sub('', attr)
def _escape(text, escapes=_generic_escapes):
for name, value in escapes:
if name in text: # performance optimization
text = text.replace(name, value)
return text
| apache-2.0 |
bingwen/shadowsocks | shadowsocks/shell.py | 652 | 12736 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None) \
and not config.get('manager_address'):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if config.get('server_port', None) and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', None)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
| apache-2.0 |
sgraham/nope | tools/perf/measurements/smoothness.py | 1 | 1618 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import power
from measurements import smoothness_controller
from telemetry.page import page_test
class Smoothness(page_test.PageTest):
def __init__(self):
super(Smoothness, self).__init__()
self._power_metric = None
self._smoothness_controller = None
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
options.AppendExtraBrowserArgs('--touch-events=enabled')
options.AppendExtraBrowserArgs('--running-performance-benchmark')
power.PowerMetric.CustomizeBrowserOptions(options)
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def WillNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
self._smoothness_controller = smoothness_controller.SmoothnessController()
self._smoothness_controller.SetUp(page, tab)
def WillRunActions(self, page, tab):
self._smoothness_controller.Start(tab)
def DidRunActions(self, page, tab):
self._power_metric.Stop(page, tab)
self._smoothness_controller.Stop(tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._power_metric.AddResults(tab, results)
self._smoothness_controller.AddResults(tab, results)
def CleanUpAfterPage(self, page, tab):
if self._power_metric:
self._power_metric.Stop(page, tab)
if self._smoothness_controller:
self._smoothness_controller.CleanUp(tab)
| bsd-3-clause |
gurneyalex/sale-workflow | __unported__/sale_exception_nostock/model/sale.py | 2 | 6153 | # -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import datetime
from openerp.osv import orm
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT)
class sale_order_line(orm.Model):
"""Adds two exception functions to be called by the sale_exceptions module.
The first one will ensure that an order line can be delivered on the
delivery date, if the related product is in MTS. Validation is done by
using the shop related to the sales order line location and using the line
delay.
The second one will raise a sales exception if the current SO will break an
order already placed in future
"""
_inherit = "sale.order.line"
def _compute_line_delivery_date(self, line_br, context=None):
date_order = line_br.order_id.date_order
date_order = datetime.datetime.strptime(date_order,
DEFAULT_SERVER_DATE_FORMAT)
# delay is a float, that is perfectly supported by timedelta
return date_order + datetime.timedelta(days=line_br.delay)
def _get_line_location(self, line_br, context=None):
return line_br.order_id.shop_id.warehouse_id.lot_stock_id.id
def can_command_at_delivery_date(self, cr, uid, l_id, context=None):
"""Predicate that checks whether a SO line can be delivered at delivery date.
Delivery date is computed using date of the order + line delay.
Location is taken from the shop linked to the line
:return: True if line can be delivered on time
"""
if context is None:
context = {}
prod_obj = self.pool['product.product']
if isinstance(l_id, (tuple, list)):
assert len(l_id) == 1, "Only one id supported"
l_id = l_id[0]
line = self.browse(cr, uid, l_id, context=context)
if not line.product_id or line.type != 'make_to_stock':
return True
delivery_date = self._compute_line_delivery_date(line, context=context)
ctx = context.copy()
ctx['to_date'] = delivery_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
ctx['location'] = self._get_line_location(line, context=context)
ctx['compute_child'] = True
# Virtual qty is made on all childs of chosen location
prod_for_virtual_qty = prod_obj.read(cr, uid, line.product_id.id,
['virtual_available'],
context=ctx)
if prod_for_virtual_qty['virtual_available'] < line.product_uom_qty:
return False
return True
def _get_states(self):
return ('waiting', 'confirmed', 'assigned')
def _get_affected_dates(
self, cr, location_id, product_id, delivery_date, context=None
):
"""Determine future dates where virtual stock has to be checked.
It will only look for stock move that pass by location_id.
If your stock location have children or you have configured automated
stock action
they must pass by the location related to SO line, else the will be
ignored
:param location_id: location id to be checked
:param product_id: product id te be checked
:return: list of dates to be checked
"""
sql = ("SELECT date FROM stock_move"
" WHERE state IN %s"
" AND date > %s"
" AND product_id = %s"
" AND location_id = %s")
cr.execute(sql, (self._get_states(),
delivery_date,
product_id,
location_id))
return (row[0] for row in cr.fetchall())
def future_orders_are_affected(self, cr, uid, l_id, context=None):
"""Predicate function that is a naive workaround for the lack of stock
reservation.
This can be a performance killer, you should not use it
if you have constantly a lot of running Orders
:return: True if future order are affected by current command line
"""
if context is None:
context = {}
prod_obj = self.pool['product.product']
if isinstance(l_id, (tuple, list)):
assert len(l_id) == 1, "Only one id supported"
l_id = l_id[0]
line = self.browse(cr, uid, l_id, context=context)
if not line.product_id or not line.type == 'make_to_stock':
return False
delivery_date = self._compute_line_delivery_date(line, context=context)
ctx = context.copy()
location_id = self._get_line_location(line, context=context)
ctx['location'] = location_id
ctx['compute_child'] = True
# Virtual qty is made on all childs of chosen location
dates = self._get_affected_dates(cr, location_id, line.product_id.id,
delivery_date, context=context)
for aff_date in dates:
ctx['to_date'] = aff_date
prod_for_virtual_qty = prod_obj.read(cr, uid, line.product_id.id,
['virtual_available'],
context=ctx)
if prod_for_virtual_qty[
'virtual_available'
] < line.product_uom_qty:
return True
return False
| agpl-3.0 |
Gaia3D/QGIS | python/plugins/processing/algs/qgis/SelectByExpression.py | 6 | 3247 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByExpression.py
---------------------
Date : July 2014
Copyright : (C) 2014 by Michaël Douchin
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Douchin'
__date__ = 'July 2014'
__copyright__ = '(C) 2014, Michael Douchin'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import processing
from qgis.core import QgsExpression, QgsFeatureRequest
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterString
class SelectByExpression(GeoAlgorithm):
LAYERNAME = 'LAYERNAME'
EXPRESSION= 'EXPRESSION'
RESULT = 'RESULT'
METHOD = 'METHOD'
METHODS = ['creating new selection', 'adding to current selection',
'removing from current selection']
def defineCharacteristics(self):
self.name = 'Select by expression'
self.group = 'Vector selection tools'
self.addParameter(ParameterVector(self.LAYERNAME,
self.tr('Input Layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterString(self.EXPRESSION,
self.tr("Expression")))
self.addParameter(ParameterSelection(self.METHOD,
self.tr('Modify current selection by'), self.METHODS, 0))
self.addOutput(OutputVector(self.RESULT, self.tr('Output'), True))
def processAlgorithm(self, progress):
filename = self.getParameterValue(self.LAYERNAME)
layer = processing.getObject(filename)
oldSelection = set(layer.selectedFeaturesIds())
method = self.getParameterValue(self.METHOD)
# Build QGIS request with expression
expression = self.getParameterValue(self.EXPRESSION)
qExp = QgsExpression(expression)
if not qExp.hasParserError():
qReq = QgsFeatureRequest(qExp)
else:
raise GeoAlgorithmExecutionException(qExp.parserErrorString())
selected = [f.id() for f in layer.getFeatures(qReq)]
if method == 1:
selected = list(oldSelection.union(selected))
elif method == 2:
selected = list(oldSelection.difference(selected))
# Set the selection
layer.setSelectedFeatures(selected)
self.setOutputValue(self.RESULT, filename)
| gpl-2.0 |
eXistenZNL/SickRage | lib/sqlalchemy/dialects/postgresql/pg8000.py | 78 | 3930 | # postgresql/pg8000.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pg8000
:name: pg8000
:dbapi: pg8000
:connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pybrary.net/pg8000/
Unicode
-------
pg8000 requires that the postgresql client encoding be
configured in the postgresql.conf file in order to use encodings
other than ascii. Set this value to the same value as the
"encoding" parameter on create_engine(), usually "utf-8".
Interval
--------
Passing data from/to the Interval type is not supported as of
yet.
"""
from ... import util, exc
import decimal
from ... import processors
from ... import types as sqltypes
from .base import PGDialect, \
PGCompiler, PGIdentifierPreparer, PGExecutionContext,\
_DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES
class _PGNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGNumericNoBind(_PGNumeric):
def bind_processor(self, dialect):
return None
class PGExecutionContext_pg8000(PGExecutionContext):
pass
class PGCompiler_pg8000(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
if '%%' in text:
util.warn("The SQLAlchemy postgresql dialect "
"now automatically escapes '%' in text() "
"expressions to '%%'.")
return text.replace('%', '%%')
class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_pg8000(PGDialect):
driver = 'pg8000'
supports_unicode_statements = True
supports_unicode_binds = True
default_paramstyle = 'format'
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_pg8000
statement_compiler = PGCompiler_pg8000
preparer = PGIdentifierPreparer_pg8000
description_encoding = 'use_encoding'
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumericNoBind,
sqltypes.Float: _PGNumeric
}
)
@classmethod
def dbapi(cls):
return __import__('pg8000').dbapi
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
dialect = PGDialect_pg8000
| gpl-3.0 |
40223139/39g7test | static/Brython3.1.3-20150514-095342/Lib/signal.py | 743 | 1646 | """This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame."""
CTRL_BREAK_EVENT=1
CTRL_C_EVENT=0
NSIG=23
SIGABRT=22
SIGBREAK=21
SIGFPE=8
SIGILL=4
SIGINT=2
SIGSEGV=11
SIGTERM=15
SIG_DFL=0
SIG_IGN=1
def signal(signalnum, handler) :
pass
| gpl-3.0 |
gnperumal/exscript | src/Exscript/parselib/Lexer.py | 7 | 6515 | # Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from Exscript.parselib.Exception import LexerException, \
CompileError, \
ExecuteError
class Lexer(object):
def __init__(self, parser_cls, *args, **kwargs):
"""
The given args are passed to the parser_cls constructor.
"""
self.parser_cls = parser_cls
self.parser_cls_args = args
self.filename = None
self.input = ''
self.input_length = 0
self.current_char = 0
self.last_char = 0
self.token_buffer = None
self.grammar = []
self.debug = kwargs.get('debug', False)
def set_grammar(self, grammar):
self.grammar.append(grammar)
self.token_buffer = None
def restore_grammar(self):
self.grammar.pop()
self.token_buffer = None
def match(self):
if self.current_char >= self.input_length:
self.token_buffer = ('EOF', '')
return
for token_type, token_regex in self.grammar[-1]:
match = token_regex.match(self.input, self.current_char)
if match is not None:
self.token_buffer = (token_type, match.group(0))
#print "Match:", self.token_buffer
return
end = self.input.find('\n', self.current_char + 2)
error = 'Invalid syntax: %s' % repr(self.input[self.current_char:end])
self.syntax_error(error)
def _get_line_number_from_char(self, char):
return self.input[:char].count('\n') + 1
def _get_current_line_number(self):
return self._get_line_number_from_char(self.current_char)
def _get_line(self, number):
return self.input.split('\n')[number - 1]
def get_current_line(self):
line = self._get_current_line_number()
return self._get_line(line)
def _get_line_from_char(self, char):
line = self._get_line_number_from_char(char)
return self._get_line(line)
def _get_line_position_from_char(self, char):
line_start = char
while line_start != 0:
if self.input[line_start - 1] == '\n':
break
line_start -= 1
line_end = self.input.find('\n', char)
return line_start, line_end
def _error(self, exc_cls, error, sender = None):
if not sender:
raise exc_cls('\n' + error)
start, end = self._get_line_position_from_char(sender.end)
line_number = self._get_line_number_from_char(sender.end)
line = self._get_line(line_number)
offset = sender.start - start
token_len = sender.end - sender.start
output = line + '\n'
if token_len <= 1:
output += (' ' * offset) + '^\n'
else:
output += (' ' * offset) + "'" + ('-' * (token_len - 2)) + "'\n"
output += '%s in %s:%s' % (error, self.filename, line_number)
raise exc_cls('\n' + output)
def error(self, error, sender = None, exc_cls = LexerException):
self._error(exc_cls, error, sender)
def syntax_error(self, error, sender = None):
self._error(CompileError, error, sender)
def runtime_error(self, error, sender = None):
self._error(ExecuteError, error, sender)
def forward(self, chars = 1):
self.last_char = self.current_char
self.current_char += chars
self.token_buffer = None
def next(self):
if self.token_buffer:
self.forward(len(self.token_buffer[1]))
def next_if(self, types, token = None):
if token is not None:
if self.current_is(types, token):
self.next()
return 1
return 0
if type(types) != type([]):
types = [types]
for t in types:
if self.current_is(t, token):
self.next()
return 1
return 0
def skip(self, types, token = None):
while self.next_if(types, token):
pass
def expect(self, sender, type, token = None):
cur_type, cur_token = self.token()
if self.next_if(type, token):
return
if token:
error = 'Expected "%s" but got %s %s'
error = error % (token, cur_type, repr(cur_token))
else:
error = 'Expected %s but got %s (%s)'
error = error % (type, cur_type, repr(cur_token))
# In this case we do not point to the token that raised the error,
# but to the actual position of the lexer.
sender.start = self.current_char
sender.end = self.current_char + 1
self.syntax_error(error, sender)
def current_is(self, type, token = None):
if self.token_buffer is None:
self.match()
if self.token_buffer[0] != type:
return 0
if token is None:
return 1
if self.token_buffer[1] == token:
return 1
return 0
def token(self):
if self.token_buffer is None:
self.match()
return self.token_buffer
def parse(self, string, filename = None):
# Re-initialize, so that the same lexer instance may be used multiple
# times.
self.filename = filename
self.input = string
self.input_length = len(string)
self.current_char = 0
self.last_char = 0
self.token_buffer = None
self.grammar = []
compiled = self.parser_cls(self, *self.parser_cls_args)
if self.debug > 3:
compiled.dump()
return compiled
def parse_file(self, filename):
with open(filename) as fp:
return self.parse(fp.read(), filename)
| gpl-2.0 |
halostatue/ansible | lib/ansible/errors/__init__.py | 163 | 7342 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors.yaml_strings import *
class AnsibleError(Exception):
'''
This is the base class for all errors raised from Ansible code,
and can be instantiated with two optional parameters beyond the
error message to control whether detailed information is displayed
when the error occurred while parsing a data file of some kind.
Usage:
raise AnsibleError('some message here', obj=obj, show_content=True)
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
which should be returned by the DataLoader() class.
'''
def __init__(self, message, obj=None, show_content=True):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
self._obj = obj
self._show_content = show_content
if obj and isinstance(obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error:
self.message = 'ERROR! %s\n\n%s' % (message, extended_error)
else:
self.message = 'ERROR! %s' % message
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_error_lines_from_file(self, file_name, line_number):
'''
Returns the line in the file which coresponds to the reported error
location, as well as the line preceding it (if the error did not
occur on the first line), to provide context to the error.
'''
target_line = ''
prev_line = ''
with open(file_name, 'r') as f:
lines = f.readlines()
target_line = lines[line_number]
if line_number > 0:
prev_line = lines[line_number - 1]
return (target_line, prev_line)
def _get_extended_error(self):
'''
Given an object reporting the location of the exception in a file, return
detailed information regarding it including:
* the line which caused the error as well as the one preceding it
* causes and suggested remedies for common syntax errors
If this error was created with show_content=False, the reporting of content
is suppressed, as the file contents may be sensitive (ie. vault data).
'''
error_message = ''
try:
(src_file, line_number, col_number) = self._obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
if target_line:
stripped_line = target_line.replace(" ","")
arrow_line = (" " * (col_number-1)) + "^ here"
#header_line = ("=" * 73)
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
# common error/remediation checking here:
# check for unquoted vars starting lines
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
# check for common dictionary mistakes
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1:
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes
else:
parts = target_line.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2:
unbalanced = True
if match:
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
if unbalanced:
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
except (IOError, TypeError):
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsibleOptionsError(AnsibleError):
''' bad or incomplete options passed '''
pass
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
class AnsibleInternalError(AnsibleError):
''' internal safeguards tripped, something happened in the code that should never happen '''
pass
class AnsibleRuntimeError(AnsibleError):
''' ansible had a problem while running a playbook '''
pass
class AnsibleModuleError(AnsibleRuntimeError):
''' a module failed somehow '''
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
''' the transport / connection_plugin had a fatal error '''
pass
class AnsibleFilterError(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleLookupError(AnsibleRuntimeError):
''' a lookup failure '''
pass
class AnsibleCallbackError(AnsibleRuntimeError):
''' a callback failure '''
pass
class AnsibleUndefinedVariable(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleFileNotFound(AnsibleRuntimeError):
''' a file missing failure '''
pass
| gpl-3.0 |
zeroq/kraut_salad | kraut_parser/models.py | 1 | 22668 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
from django.db import models
# Create your models here.
class Namespace(models.Model):
namespace = models.CharField(max_length=255, default='nospace', unique=True)
description = models.TextField(null=True, blank=True)
def __unicode__(self):
return u"%s" % (self.namespace)
class Meta:
ordering = ['namespace']
class Confidence(models.Model):
value = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True)
def __unicode__(self):
return u"%s" % (self.value)
class Package(models.Model):
name = models.CharField(max_length=255)
creation_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True, blank=True)
short_description = models.CharField(max_length=255, null=True, blank=True)
namespace = models.ManyToManyField('Namespace', blank=True)
version = models.CharField(max_length=255)
package_id = models.CharField(max_length=255)
source = models.CharField(max_length=255)
produced_time = models.DateTimeField(null=True, blank=True)
threat_actors = models.ManyToManyField('ThreatActor', blank=True)
campaigns = models.ManyToManyField('Campaign', blank=True)
indicators = models.ManyToManyField('Indicator', blank=True)
observables = models.ManyToManyField('Observable', blank=True)
ttps = models.ManyToManyField('TTP', blank=True)
def __unicode__(self):
return u"%s" % (self.name)
class Package_Intent(models.Model):
intent = models.CharField(max_length=255)
package = models.ForeignKey(Package, on_delete=models.CASCADE)
def __unicode__(self):
return u"%s" % (self.intent)
class Package_Reference(models.Model):
reference = models.CharField(max_length=1024)
package = models.ForeignKey(Package, on_delete=models.CASCADE)
def __unicode__(self):
return u"%s" % (self.reference)
class TTP(models.Model):
""" Tools, Tactics, and Procedures
"""
name = models.CharField(max_length=255)
creation_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True, blank=True)
short_description = models.CharField(max_length=255, null=True, blank=True)
namespace = models.ManyToManyField('Namespace', blank=True)
ttp_id = models.CharField(max_length=255)
related_ttps = models.ManyToManyField('self', through='RelatedTTP', symmetrical=False, related_name='ttp_related_to_ttp', blank=True)
def add_related_ttp(self, ttp, relationship):
rel_ttp, rel_ttp_created = RelatedTTP.objects.get_or_create(from_ttp=self, to_ttp=ttp, relationship=relationship)
return rel_ttp
def __unicode__(self):
return u"%s" % (self.name)
class RelatedTTP(models.Model):
from_ttp = models.ForeignKey(TTP, related_name='from_ttp', on_delete=models.CASCADE)
to_ttp = models.ForeignKey(TTP, related_name='to_ttp', on_delete=models.CASCADE)
relationship = models.CharField(max_length=255)
class MalwareInstance(models.Model):
ttp_ref = models.ForeignKey(TTP, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
creation_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True, blank=True)
short_description = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return u"%s" % (self.name)
class MalwareInstanceNames(models.Model):
instance_ref = models.ForeignKey(MalwareInstance, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
def __unicode__(self):
return u"%s" % (self.name)
class MalwareInstanceTypes(models.Model):
instance_ref = models.ForeignKey(MalwareInstance, on_delete=models.CASCADE)
_type = models.CharField(max_length=255)
def __unicode__(self):
return u"%s" % (self._type)
class AttackPattern(models.Model):
ttp_ref = models.ForeignKey(TTP, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
creation_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True, blank=True)
short_description = models.CharField(max_length=255, null=True, blank=True)
capec_id = models.CharField(max_length=255)
class Campaign(models.Model):
name = models.CharField(max_length=255)
creation_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True, blank=True)
short_description = models.CharField(max_length=255, null=True, blank=True)
namespace = models.ManyToManyField('Namespace', blank=True)
status = models.CharField(max_length=255, default='Ongoing')
campaign_id = models.CharField(max_length=255)
confidence = models.ManyToManyField(Confidence, blank=True)
related_indicators = models.ManyToManyField('Indicator', blank=True)
related_ttps = models.ManyToManyField(TTP, through='RelationCampaignTTP', symmetrical=False, related_name='ttp_related_to_campaign', blank=True)
associated_campaigns = models.ManyToManyField('self', blank=True)
def add_related_ttp(self, ttp, relationship):
rel_ttp, rel_ttp_created = RelationCampaignTTP.objects.get_or_create(
campaign=self,
ttp=ttp,
relationship=relationship
)
return rel_ttp
def __unicode__(self):
return u"%s" % (self.name)
class RelationCampaignTTP(models.Model):
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)
ttp = models.ForeignKey(TTP, on_delete=models.CASCADE)
relationship = models.CharField(max_length=255)
class ThreatActor(models.Model):
name = models.CharField(max_length=255)
creation_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True, blank=True)
short_description = models.CharField(max_length=255, null=True, blank=True)
namespace = models.ManyToManyField('Namespace', blank=True)
campaigns = models.ManyToManyField(Campaign, blank=True)
associated_threat_actors = models.ManyToManyField('self', blank=True)
threat_actor_id = models.CharField(max_length=255)
observed_ttps = models.ManyToManyField(TTP, through='ObservedTTP', symmetrical=False, related_name='ttp_observed_at_ta', blank=True)
def add_observed_ttp(self, ttp, relationship):
obs_ttp, obs_ttp_created = ObservedTTP.objects.get_or_create(
ta=self,
ttp=ttp,
relationship=relationship
)
return obs_ttp
def __unicode__(self):
return u"%s" % (self.name)
class ObservedTTP(models.Model):
ta = models.ForeignKey(ThreatActor, on_delete=models.CASCADE)
ttp = models.ForeignKey(TTP, on_delete=models.CASCADE)
relationship = models.CharField(max_length=255)
class TA_Types(models.Model):
ta_type = models.CharField(max_length=255)
actor = models.ForeignKey(ThreatActor, on_delete=models.CASCADE)
def __unicode__(self):
return u"%s -> %s" % (self.actor.name, self.ta_type)
class Meta:
unique_together = (("ta_type", "actor"),)
class TA_Roles(models.Model):
role = models.CharField(max_length=255)
actor = models.ForeignKey(ThreatActor, on_delete=models.CASCADE)
def __unicode__(self):
return u"%s" % (self.role)
class Meta:
unique_together = (("role", "actor"),)
class TA_Alias(models.Model):
alias = models.CharField(max_length=255)
namespace = models.ManyToManyField('Namespace', blank=True)
actor = models.ForeignKey(ThreatActor, on_delete=models.CASCADE)
alias_type = models.CharField(max_length=255, default='UnofficialName')
def __unicode__(self):
return u"%s" % (self.alias)
class Meta:
unique_together = (("alias", "actor"),)
class Indicator_Kill_Chain_Phase(models.Model):
name = models.CharField(max_length=255, unique=True)
ordinality = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return u"%s" % (self.name)
class Indicator_Type(models.Model):
itype = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return u"%s" % (self.itype)
class Indicator(models.Model):
name = models.CharField(max_length=255)
creation_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True, blank=True)
short_description = models.CharField(max_length=255, null=True, blank=True)
indicator_id = models.CharField(max_length=255)
namespace = models.ManyToManyField('Namespace', blank=True)
indicator_types = models.ManyToManyField(Indicator_Type, blank=True)
confidence = models.ManyToManyField(Confidence, blank=True)
related_indicators = models.ManyToManyField('self', blank=True)
indicator_composition_operator = models.CharField(max_length=3, default="OR")
ttps = models.ManyToManyField('TTP', blank=True)
produced_time = models.DateTimeField(null=True, blank=True)
kill_chain_phases = models.ManyToManyField(Indicator_Kill_Chain_Phase, blank=True)
def __unicode__(self):
return u"%s" % (self.name)
class ObservableComposition(models.Model):
name = models.CharField(max_length=255)
operator = models.CharField(max_length=3, default='OR')
indicator = models.ManyToManyField(Indicator, blank=True)
observables = models.ManyToManyField('Observable', blank=True)
observable_compositions = models.ManyToManyField('self', blank=True)
def __unicode__(self):
return u"%s" % (self.name)
class Observable(models.Model):
name = models.CharField(max_length=255)
creation_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True, blank=True)
short_description = models.CharField(max_length=255, null=True, blank=True)
namespace = models.ManyToManyField('Namespace', blank=True)
indicators = models.ManyToManyField(Indicator, blank=True)
compositions = models.ManyToManyField(ObservableComposition, blank=True)
observable_type = models.CharField(max_length=255, null=True, blank=True)
observable_id = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return u"%s" % (self.name)
# Objects
class Related_Object(models.Model):
relationship = models.CharField(max_length=255, default='contains')
object_one_id = models.IntegerField()
object_one_type = models.CharField(max_length=255)
object_two_id = models.IntegerField()
object_two_type = models.CharField(max_length=255)
def __unicode__(self):
return u"%s:%s -> %s:%s" % (self.object_one_id, self.object_one_type, self.object_two_id, self.object_two_type)
class Meta:
unique_together = (("relationship", "object_one_id", "object_two_id", "object_one_type", "object_two_type"),)
index_together = (("object_one_id", "object_one_type"), ("object_two_id", "object_two_type"))
class File_Custom_Properties(models.Model):
property_name = models.CharField(max_length=255, null=True, blank=True)
property_value = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return u"%s - %s" % (self.property_name, self.property_value)
class Meta:
unique_together = (("property_name", "property_value"),)
class File_Meta_Object(models.Model):
file_name = models.CharField(max_length=255, null=True, blank=True)
file_path = models.CharField(max_length=255, null=True, blank=True)
file_extension = models.CharField(max_length=255, null=True, blank=True)
file_size = models.IntegerField(default=0)
def __unicode__(self):
if self.file_name:
return u"%s" % (self.file_name)
else:
return u"File Meta Object"
class Meta:
unique_together = (("file_name", "file_path", "file_extension", "file_size"),)
class File_Object(models.Model):
file_meta = models.ManyToManyField(File_Meta_Object, blank=True)
file_custom = models.ManyToManyField(File_Custom_Properties, blank=True)
md5_hash = models.CharField(max_length=32, null=True, blank=True)
sha256_hash = models.CharField(max_length=64, null=True, blank=True)
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.md5_hash:
return u"%s" % (self.md5_hash)
elif self.sha256_hash:
return u"%s" % (self.sha256_hash)
else:
return u"File Object"
class Meta:
index_together = (("md5_hash", "sha256_hash"),)
class URI_Object(models.Model):
uri_value = models.CharField(max_length=255)
uri_type = models.CharField(max_length=255)
condition = models.CharField(max_length=255, default="equals")
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.uri_value:
return u"%s" % (self.uri_value)
else:
return u"URI Object"
class Meta:
unique_together = (("uri_value", "uri_type", "condition"),)
class Address_Object(models.Model):
address_value = models.CharField(max_length=255)
category = models.CharField(max_length=255, default="ipv4-addr")
condition = models.CharField(max_length=255, default="equals")
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.address_value:
return u"%s" % (self.address_value)
else:
return u"Address Object"
class Meta:
unique_together = (("address_value", "category", "condition"),)
class Mutex_Object(models.Model):
mutex_name = models.CharField(max_length=255)
condition = models.CharField(max_length=255, default="equals")
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.mutex_name:
return u"%s" % (self.mutex_name)
else:
return u"Mutex Object"
class Meta:
unique_together = (("mutex_name", "condition"),)
class Driver_Object(models.Model):
driver_name = models.CharField(max_length=255)
condition = models.CharField(max_length=255, default="contains")
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.driver_name:
return u"%s" % (self.driver_name)
else:
return u"Driver Object"
class Meta:
unique_together = (("driver_name", "condition"),)
class Win_Registry_Object(models.Model):
key = models.CharField(max_length=1024)
hive = models.CharField(max_length=255)
data = models.CharField(max_length=1024)
data_name = models.CharField(max_length=255)
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.key and self.hive:
return u"%s - %s" % (self.hive, self.key)
elif self.key:
return u"%s" % (self.key)
else:
return u"Registry Observable"
class Meta:
unique_together = (("key", "hive", "data", "data_name"),)
class Link_Object(models.Model):
link = models.CharField(max_length=1024)
link_type = models.CharField(max_length=255)
url_label = models.CharField(max_length=1024)
condition = models.CharField(max_length=255, default="contains")
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.link:
return u"%s" % (self.link)
elif self.url_label:
return u"%s" % (self.url_label)
else:
return u"Link Object"
class Meta:
unique_together = (("link", "url_label", "condition", "link_type"),)
class Code_Object(models.Model):
custom_value = models.CharField(max_length=255)
custom_condition = models.CharField(max_length=255, default="equals")
purpose = models.CharField(max_length=255, null=True, blank=True)
code_language = models.CharField(max_length=255, null=True, blank=True)
code_segment = models.TextField(null=True, blank=True)
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.custom_value:
return u"%s" % (self.custom_value)
else:
return u"Code Object"
class Meta:
unique_together = (("custom_value", "custom_condition"),)
class EmailMessage_recipient(models.Model):
recipient = models.CharField(max_length=255, unique=True)
is_spoofed = models.BooleanField(default=False)
condition = models.CharField(max_length=255, default="equals")
class EmailMessage_recipient_cc(models.Model):
recipient = models.CharField(max_length=255, unique=True)
is_spoofed = models.BooleanField(default=False)
condition = models.CharField(max_length=255, default="equals")
class EmailMessage_recipient_bcc(models.Model):
recipient = models.CharField(max_length=255, unique=True)
is_spoofed = models.BooleanField(default=False)
condition = models.CharField(max_length=255, default="equals")
class EmailMessage_sender(models.Model):
sender = models.CharField(max_length=255, unique=True)
is_spoofed = models.BooleanField(default=False)
condition = models.CharField(max_length=255, default="equals")
class EmailMessage_from(models.Model):
sender = models.CharField(max_length=255, unique=True)
is_spoofed = models.BooleanField(default=False)
condition = models.CharField(max_length=255, default="equals")
class EmailMessage_link(models.Model):
url = models.CharField(max_length=255, unique=True)
class EmailMessage_Object(models.Model):
raw_body = models.TextField(null=True, blank=True)
raw_header = models.TextField(null=True, blank=True)
subject = models.CharField(max_length=255, null=True, blank=True)
sender = models.ManyToManyField(EmailMessage_sender, blank=True)
from_string = models.ManyToManyField(EmailMessage_from, blank=True)
recipients = models.ManyToManyField(EmailMessage_recipient, blank=True)
recipients_cc = models.ManyToManyField(EmailMessage_recipient_cc, blank=True)
recipients_bcc = models.ManyToManyField(EmailMessage_recipient_bcc, blank=True)
links = models.ManyToManyField(EmailMessage_link, blank=True)
attachments = models.ManyToManyField(File_Object, blank=True)
email_date = models.DateTimeField(null=True, blank=True)
message_id = models.CharField(max_length=255, null=True, blank=True)
content_type = models.CharField(max_length=255, null=True, blank=True)
mime_version = models.CharField(max_length=255, null=True, blank=True)
user_agent = models.CharField(max_length=255, null=True, blank=True)
x_mailer = models.CharField(max_length=255, null=True, blank=True)
observables = models.ManyToManyField(Observable)
def __unicode__(self):
if self.subject:
return u"%s" % (self.subject)
else:
return u"EmailMessage Object"
class Meta:
unique_together = (("raw_body", "raw_header", "subject", "message_id", "x_mailer", "user_agent", "mime_version", "content_type", "email_date"),)
class Port_Object(models.Model):
port = models.IntegerField(unique=True)
def __unicode__(self):
return u"%s" % (self.port)
class HTTPClientRequest(models.Model):
raw_header = models.TextField(null=True, blank=True)
message_body = models.TextField(null=True, blank=True)
request_method = models.CharField(max_length=10, null=True, blank=True)
request_uri = models.CharField(max_length=1024, null=True, blank=True)
request_version = models.CharField(max_length=10, null=True, blank=True)
user_agent = models.CharField(max_length=255, null=True, blank=True)
domain_name = models.ForeignKey(URI_Object, null=True, blank=True, on_delete=models.CASCADE)
port = models.ForeignKey(Port_Object, null=True, blank=True, on_delete=models.CASCADE)
def __unicode__(self):
if self.request_uri and self.domain_name:
return u"%s" % (self.request_uri)
else:
return u"HTTPClientRequest Object"
class HTTPSession_Object(models.Model):
client_request = models.ForeignKey(HTTPClientRequest, null=True, blank=True, on_delete=models.CASCADE)
# http server response not implemented
observables = models.ManyToManyField(Observable)
class DNSQuestion(models.Model):
qname = models.ForeignKey(URI_Object, on_delete=models.CASCADE)
qtype = models.CharField(max_length=255, null=True, blank=True)
qclass = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return u"%s" % (self.qname.uri_value)
class DNSQuery_Object(models.Model):
successful = models.BooleanField(default=False)
question = models.ForeignKey(DNSQuestion, on_delete=models.CASCADE)
observables = models.ManyToManyField(Observable)
class ImportedFunction(models.Model):
function_name = models.CharField(max_length=255)
virtual_address = models.CharField(max_length=255, null=True, blank=True)
class PEImports(models.Model):
file_name = models.CharField(max_length=255)
virtual_address = models.CharField(max_length=255, null=True, blank=True)
imported_functions = models.ManyToManyField(ImportedFunction)
class ExportedFunction(models.Model):
function_name = models.CharField(max_length=255)
entry_point = models.CharField(max_length=255, null=True, blank=True)
class PEExports(models.Model):
name = models.CharField(max_length=255)
exported_functions = models.ManyToManyField(ExportedFunction)
class PESections(models.Model):
section_name = models.CharField(max_length=255)
entropy = models.FloatField(null=True, blank=True)
virtual_size = models.CharField(max_length=255, null=True, blank=True)
virtual_address = models.CharField(max_length=255, null=True, blank=True)
size_of_raw_data = models.CharField(max_length=255, null=True, blank=True)
class WindowsExecutable_Object(models.Model):
pe_type = models.CharField(max_length=255, default="executable")
object_id = models.CharField(max_length=255)
imports = models.ManyToManyField(PEImports)
exports = models.ManyToManyField(PEExports)
sections = models.ManyToManyField(PESections)
observables = models.ManyToManyField(Observable)
| gpl-2.0 |
patdoyle1/FastMath | lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| gpl-2.0 |
brinbois/Sick-Beard | sickbeard/clients/requests/packages/charade/chardistribution.py | 184 | 9457 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| gpl-3.0 |
cryptapus/electrum | electrum/gui/kivy/uix/dialogs/question.py | 4 | 1357 | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from electrum.gui.kivy.i18n import _
Builder.load_string('''
<Question@Popup>
id: popup
title: ''
message: ''
size_hint: 0.8, 0.5
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Label:
id: label
text: root.message
text_size: self.width, None
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: _('No')
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(False)
popup.dismiss()
Button:
text: _('Yes')
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(True)
popup.dismiss()
''')
class Question(Factory.Popup):
def __init__(self, msg, callback):
Factory.Popup.__init__(self)
self.title = _('Question')
self.message = msg
self.callback = callback
| mit |
liminspace/django-transtool | transtool/settings.py | 1 | 2429 | from django.conf import settings
TRANSTOOL_DL_URL = getattr(settings, 'TRANSTOOL_DL_URL', None) # http://example.com/localemessages/export/
TRANSTOOL_DL_KEY = getattr(settings, 'TRANSTOOL_DL_KEY', None) # for import translates from remote server
TRANSTOOL_EXPORT_KEY = getattr(settings, 'TRANSTOOL_EXPORT_KEY', None) # for export translates by http request
TRANSTOOL_PROJECT_BASE_DIR = getattr(settings, 'TRANSTOOL_PROJECT_BASE_DIR', settings.BASE_DIR) # root dir of project
TRANSTOOL_LOCALES = getattr(settings, 'TRANSTOOL_LOCALES', tuple(map(lambda t: t[0], settings.LANGUAGES)))
TRANSTOOL_DEFAULT_DOMAINS = {
'django': {
'EXT': ('html', 'txt', 'py'),
},
'djangojs': {
'EXT': ('js',),
},
}
# update TRANSTOOL_DEFAULT_DOMAINS from project settings using addition method
for _domain, _opts in getattr(settings, 'TRANSTOOL_DEFAULT_DOMAINS', {}).items():
if _domain in TRANSTOOL_DEFAULT_DOMAINS:
# if domain is exists
# just update options without remove default ones
for _k, _v in _opts.items():
TRANSTOOL_DEFAULT_DOMAINS[_domain][_k] = _v
else:
# add new domain
TRANSTOOL_DEFAULT_DOMAINS[_domain] = _opts
# Example:
# TRANSTOOL_LOCALE_PATHS = (
# (os.path.join(BASE_DIR, 'apps/myapp/'), { # directory that contains locale subdirectory
# 'django': {
# 'DIRS': (
# os.path.join(BASE_DIR, 'apps/myapp/'),
# os.path.join(BASE_DIR, 'templates/myapp/'),
# ),
# # 'EXT': ('html', 'txt', 'py'),
# },
# 'djangojs': {
# 'DIRS': (
# os.path.join(BASE_DIR, 'static/myapp/'),
# ),
# # 'EXT': ('js',),
# # expected args: (locale_path: str, cmd_kwargs: dict) -> dict
# # 'BEFORE_MAKE': 'path.to.function.before_make',
# # expected args: (locale_path: str, cmd_kwargs: dict) -> None
# # 'AFTER_MAKE': 'path.to.function.after_make',
# },
# }),
# (os.path.join(BASE_DIR, './'), {
# 'django': {
# 'DIRS': (
# BASE_DIR,
# ),
# 'REST': True,
# },
# 'djangojs': {
# 'DIRS': (
# BASE_DIR,
# ),
# 'REST': True,
# },
# }),
# )
TRANSTOOL_LOCALE_PATHS = getattr(settings, 'TRANSTOOL_LOCALE_PATHS', ())
| mit |
ict-felix/stack | optin_manager/src/python/openflow/optin_manager/sfa/openflow_utils/SliverRSpecParser.py | 4 | 5551 | class GENIDatapath():
def __init__ (self, dom):
super(GENIDatapath, self).__init__()
self.component_id = None
if dom.tag == u'{%s}datapath' % (OFNSv3):
self.__parse_openflowv3_datapath(dom)
def __parse_openflowv3_datapath (self, dom):
self.component_id = dom.get("component_id")
cmid = dom.get("component_manager_id")
if self.component_id.count(cmid[:-12]) != 1:
raise ComponentManagerIDMismatch(self.component_id, cmid)
if cmid != getManagerID():
raise UnknownComponentManagerID(self.component_id)
self.dpid = GeniDB.getSwitchDPID(self.component_id)
self.ports = set()
for port in dom.findall('{%s}port' % (OFNSv3)):
p = foam.openflow.types.Port()
p.num = int(port.get("num"))
p.dpid = self.dpid
self.ports.add(p)
class GENISliver:
def __init__ (self, dom):
self.__urn = None
self.__slice_urn = None
self.__user_urn = None
self.__ref = None
self.__pend_reason = None
if dom:
self.__parseDatav3(dom)
def setPendReason (self, reason):
self.__pend_reason = reason
def getURN (self):
return self.__urn
def getSliceURN (self):
return self.__slice_urn
def getUserURN (self):
return self.__user_urn
def __parseDatav3 (self, dom):
sliver_dom = dom.find('{%s}sliver' % (OFNSv3))
if sliver_dom is None:
raise Exception ("NoSliverTag")
self.setEmail(sliver_dom.get("email", None))
self.setDescription(sliver_dom.get("description", None))
self.__ref = sliver_dom.get("ref", None)
controller_elems = sliver_dom.findall('{%s}controller' % (OFNSv3))
if controller_elems is None:
raise Exception ("NoControllersDefined")
for elem in controller_elems:
self.addController(self.makeController(elem))
groups = sliver_dom.findall('{%s}group' % (OFNSv3))
for grp in groups:
dplist = []
grpname = grp.get("name")
if grpname is None:
raise Exception("NoGroupName")
datapaths = grp.findall('{%s}datapath' % (OFNSv3))
for dp in datapaths:
# try:
dplist.append(GENIDatapath(dp))
# except UnmanagedComponent, e:
# continue
self.addGroup(grpname, dplist)
matches = sliver_dom.findall('{%s}match' % (OFNSv3))
for flowspec in matches:
fs = self.parseFlowSpec(flowspec, OFNSv3)
self.addFlowSpec(fs)
vlinks = sliver_dom.findall('{%s}vlink' % (OFNSv3))
for virtuallink in vlinks:
vl = self.parseVirtualLink(virtuallink, OFNSv3)
self.addVirtualLink(vl)
def getDataDict (self, detail = True):
obj = dict()
obj["user"] = self.__user_urn
obj["sliver_urn"] = self.__urn
obj["ref"] = self.__ref
obj["pend_reason"] = self.__pend_reason
return obj
def setSliverURN (self, sliver_urn):
self.__urn = sliver_urn
def setUserURN (self, user_urn):
self.__user_urn = user_urn
def setUserEmail (self, email, overwrite=False):
if overwrite:
self.setEmail(email)
elif self.getEmail() is None:
self.setEmail(email)
def generateURN (self, slice_urn):
self.__slice_urn = slice_urn
return "%s:%s" % (slice_urn, self.getUUID())
def parseVirtualLink (self, elem, ns):
vl = VirtualLink()
hopsdom = elem.find("{%s}hops" % (ns))
if hopsdom is None:
raise NoHopsTag(elem)
linkstr = ""
hops = hopsdom.findall('{%s}hop' % (ns))
for hop in hops:
hopstr = hop.get("link").strip()
if hop.get("index").strip() is not "1":
linkstr += ","
linkstr += hopstr
vl.addVLinkFromString(linkstr)
return vl
def parseFlowSpec (self, elem, ns):
fs = FlowSpec()
packetdom = elem.find("{%s}packet" % (ns))
if packetdom is None:
raise NoPacketTag(elem)
use_groups = elem.findall('{%s}use-group' % (ns))
for grp in use_groups:
grpname = grp.get("name")
datapaths = self.getGroupDatapaths(grpname)
for dp in datapaths:
fs.bindDatapath(dp)
nodes = elem.findall('{%s}datapath' % (ns))
for dpnode in nodes:
dp = GENIDatapath(dpnode)
fs.bindDatapath(dp)
nodes = packetdom.findall('{%s}dl_src' % (ns))
for dls in nodes:
macstr = dls.get("value").strip()
fs.addDlSrcFromString(macstr)
nodes = packetdom.findall('{%s}dl_dst' % (ns))
for dld in nodes:
macstr = dld.get("value").strip()
fs.addDlDstFromString(macstr)
nodes = packetdom.findall('{%s}dl_type' % (ns))
for dlt in nodes:
dltstr = dlt.get("value").strip()
fs.addDlTypeFromString(dltstr)
nodes = packetdom.findall('{%s}dl_vlan' % (ns))
for elem in nodes:
vlidstr = elem.get("value").strip()
fs.addVlanIDFromString(vlidstr)
nodes = packetdom.findall('{%s}nw_src' % (ns))
for elem in nodes:
nwstr = elem.get("value").strip()
fs.addNwSrcFromString(nwstr)
nodes = packetdom.findall('{%s}nw_dst' % (ns))
for elem in nodes:
nwstr = elem.get("value").strip()
fs.addNwDstFromString(nwstr)
nodes = packetdom.findall('{%s}nw_proto' % (ns))
for elem in nodes:
nwproto = elem.get("value").strip()
fs.addNwProtoFromString(nwproto)
nodes = packetdom.findall('{%s}tp_src' % (ns))
for elem in nodes:
tpsrc = elem.get("value").strip()
fs.addTpSrcFromString(tpsrc)
nodes = packetdom.findall('{%s}tp_dst' % (ns))
for elem in nodes:
tpdst = elem.get("value").strip()
fs.addTpDstFromString(tpdst)
return fs
| apache-2.0 |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/django/core/management/commands/makemessages.py | 9 | 25792 | from __future__ import unicode_literals
import fnmatch
import glob
import io
import os
import re
import sys
from functools import total_ordering
from itertools import dropwhile
import django
from django.conf import settings
from django.core.files.temp import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import (
find_command, handle_extensions, popen_wrapper,
)
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_str
from django.utils.functional import cached_property
from django.utils.jslex import prepare_js_for_gettext
from django.utils.text import get_text_list
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
NO_LOCALE_DIR = object()
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError("Can't find %s. Make sure you have GNU "
"gettext tools 0.15 or newer installed." % program)
def gettext_popen_wrapper(args, os_err_exc_type=CommandError, stdout_encoding="utf-8"):
"""
Makes sure text obtained from stdout of gettext utilities is Unicode.
"""
# This both decodes utf-8 and cleans line endings. Simply using
# popen_wrapper(universal_newlines=True) doesn't properly handle the
# encoding. This goes back to popen's flaky support for encoding:
# https://bugs.python.org/issue6135. This is a solution for #23271, #21928.
# No need to do anything on Python 2 because it's already a byte-string there.
manual_io_wrapper = six.PY3 and stdout_encoding != DEFAULT_LOCALE_ENCODING
stdout, stderr, status_code = popen_wrapper(args, os_err_exc_type=os_err_exc_type,
universal_newlines=not manual_io_wrapper)
if manual_io_wrapper:
stdout = io.TextIOWrapper(io.BytesIO(stdout), encoding=stdout_encoding).read()
if six.PY2:
stdout = stdout.decode(stdout_encoding)
return stdout, stderr, status_code
@total_ordering
class TranslatableFile(object):
def __init__(self, dirpath, file_name, locale_dir):
self.file = file_name
self.dirpath = dirpath
self.locale_dir = locale_dir
def __repr__(self):
return "<TranslatableFile: %s>" % os.sep.join([self.dirpath, self.file])
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
return self.path < other.path
@property
def path(self):
return os.path.join(self.dirpath, self.file)
class BuildFile(object):
"""
Represents the state of a translatable file during the build process.
"""
def __init__(self, command, domain, translatable):
self.command = command
self.domain = domain
self.translatable = translatable
@cached_property
def is_templatized(self):
if self.domain == 'djangojs':
return self.command.gettext_version < (0, 18, 3)
elif self.domain == 'django':
file_ext = os.path.splitext(self.translatable.file)[1]
return file_ext != '.py'
return False
@cached_property
def path(self):
return self.translatable.path
@cached_property
def work_path(self):
"""
Path to a file which is being fed into GNU gettext pipeline. This may
be either a translatable or its preprocessed version.
"""
if not self.is_templatized:
return self.path
extension = {
'djangojs': 'c',
'django': 'py',
}.get(self.domain)
filename = '%s.%s' % (self.translatable.file, extension)
return os.path.join(self.translatable.dirpath, filename)
def preprocess(self):
"""
Preprocess (if necessary) a translatable file before passing it to
xgettext GNU gettext utility.
"""
from django.utils.translation import templatize
if not self.is_templatized:
return
with io.open(self.path, 'r', encoding=settings.FILE_CHARSET) as fp:
src_data = fp.read()
if self.domain == 'djangojs':
content = prepare_js_for_gettext(src_data)
elif self.domain == 'django':
content = templatize(src_data, self.path[2:])
with io.open(self.work_path, 'w', encoding='utf-8') as fp:
fp.write(content)
def postprocess_messages(self, msgs):
"""
Postprocess messages generated by xgettext GNU gettext utility.
Transform paths as if these messages were generated from original
translatable files rather than from preprocessed versions.
"""
if not self.is_templatized:
return msgs
# Remove '.py' suffix
if os.name == 'nt':
# Preserve '.\' prefix on Windows to respect gettext behavior
old_path = self.work_path
new_path = self.path
else:
old_path = self.work_path[2:]
new_path = self.path[2:]
return re.sub(
r'^(#: .*)(' + re.escape(old_path) + r')',
lambda match: match.group().replace(old_path, new_path),
msgs,
flags=re.MULTILINE
)
def cleanup(self):
"""
Remove a preprocessed copy of a translatable file (if any).
"""
if self.is_templatized:
# This check is needed for the case of a symlinked file and its
# source being processed inside a single group (locale dir);
# removing either of those two removes both.
if os.path.exists(self.work_path):
os.unlink(self.work_path)
def write_pot_file(potfile, msgs):
"""
Write the :param potfile: POT file with the :param msgs: contents,
previously making sure its format is valid.
"""
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
with io.open(potfile, 'a', encoding='utf-8') as fp:
fp.write(msgs)
class Command(BaseCommand):
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale, --exclude or --all options.")
translatable_file_class = TranslatableFile
build_file_class = BuildFile
requires_system_checks = False
leave_locale_alone = True
msgmerge_options = ['-q', '--previous']
msguniq_options = ['--to-code=utf-8']
msgattrib_options = ['--no-obsolete']
xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators']
def add_arguments(self, parser):
parser.add_argument('--locale', '-l', default=[], dest='locale', action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times.')
parser.add_argument('--exclude', '-x', default=[], dest='exclude', action='append',
help='Locales to exclude. Default is none. Can be used multiple times.')
parser.add_argument('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").')
parser.add_argument('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.')
parser.add_argument('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: "html,txt,py", or "js" '
'if the domain is "djangojs"). Separate multiple extensions with '
'commas, or use -e multiple times.',
action='append')
parser.add_argument('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining '
'source code and templates for translation strings.')
parser.add_argument('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN',
help='Ignore files or directories matching this glob-style pattern. '
'Use multiple times to ignore more.')
parser.add_argument('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.")
parser.add_argument('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines.")
parser.add_argument('--no-location', action='store_true', dest='no_location',
default=False, help="Don't write '#: filename:line' lines.")
parser.add_argument('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings.")
parser.add_argument('--keep-pot', action='store_true', dest='keep_pot',
default=False, help="Keep .pot file after making messages. Useful when debugging.")
def handle(self, *args, **options):
locale = options.get('locale')
exclude = options.get('exclude')
self.domain = options.get('domain')
self.verbosity = options.get('verbosity')
process_all = options.get('all')
extensions = options.get('extensions')
self.symlinks = options.get('symlinks')
# Need to ensure that the i18n framework is enabled
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N=True)
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~', '*.pyc']
self.ignore_patterns = list(set(ignore_patterns))
# Avoid messing with mutable class variables
if options.get('no_wrap'):
self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap']
self.msguniq_options = self.msguniq_options[:] + ['--no-wrap']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap']
self.xgettext_options = self.xgettext_options[:] + ['--no-wrap']
if options.get('no_location'):
self.msgmerge_options = self.msgmerge_options[:] + ['--no-location']
self.msguniq_options = self.msguniq_options[:] + ['--no-location']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-location']
self.xgettext_options = self.xgettext_options[:] + ['--no-location']
self.no_obsolete = options.get('no_obsolete')
self.keep_pot = options.get('keep_pot')
if self.domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains "
"'django' and 'djangojs'")
if self.domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt', 'py']
self.extensions = handle_extensions(exts)
if (locale is None and not exclude and not process_all) or self.domain is None:
raise CommandError("Type '%s help %s' for usage information." % (
os.path.basename(sys.argv[0]), sys.argv[1]))
if self.verbosity > 1:
self.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(self.extensions), 'and'))
self.invoked_for_django = False
self.locale_paths = []
self.default_locale_path = None
if os.path.isdir(os.path.join('conf', 'locale')):
self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))]
self.default_locale_path = self.locale_paths[0]
self.invoked_for_django = True
else:
self.locale_paths.extend(settings.LOCALE_PATHS)
# Allow to run makemessages inside an app dir
if os.path.isdir('locale'):
self.locale_paths.append(os.path.abspath('locale'))
if self.locale_paths:
self.default_locale_path = self.locale_paths[0]
if not os.path.exists(self.default_locale_path):
os.makedirs(self.default_locale_path)
# Build locale list
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path))
all_locales = map(os.path.basename, locale_dirs)
# Account for excluded locales
if process_all:
locales = all_locales
else:
locales = locale or all_locales
locales = set(locales) - set(exclude)
if locales:
check_programs('msguniq', 'msgmerge', 'msgattrib')
check_programs('xgettext')
try:
potfiles = self.build_potfiles()
# Build po files for each selected locale
for locale in locales:
if self.verbosity > 0:
self.stdout.write("processing locale %s\n" % locale)
for potfile in potfiles:
self.write_po_file(potfile, locale)
finally:
if not self.keep_pot:
self.remove_potfiles()
@cached_property
def gettext_version(self):
# Gettext tools will output system-encoded bytestrings instead of UTF-8,
# when looking up the version. It's especially a problem on Windows.
out, err, status = gettext_popen_wrapper(
['xgettext', '--version'],
stdout_encoding=DEFAULT_LOCALE_ENCODING,
)
m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out)
if m:
return tuple(int(d) for d in m.groups() if d is not None)
else:
raise CommandError("Unable to get gettext version. Is it installed?")
def build_potfiles(self):
"""
Build pot files and apply msguniq to them.
"""
file_list = self.find_files(".")
self.remove_potfiles()
self.process_files(file_list)
potfiles = []
for path in self.locale_paths:
potfile = os.path.join(path, '%s.pot' % str(self.domain))
if not os.path.exists(potfile):
continue
args = ['msguniq'] + self.msguniq_options + [potfile]
msgs, errors, status = gettext_popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
with io.open(potfile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
potfiles.append(potfile)
return potfiles
def remove_potfiles(self):
for path in self.locale_paths:
pot_path = os.path.join(path, '%s.pot' % str(self.domain))
if os.path.exists(pot_path):
os.unlink(pot_path)
def find_files(self, root):
"""
Helper method to get all files in the given root. Also check that there
is a matching locale dir for each file.
"""
def is_ignored(path, ignore_patterns):
"""
Check if the given path should be ignored or not.
"""
filename = os.path.basename(path)
ignore = lambda pattern: (fnmatch.fnmatchcase(filename, pattern) or
fnmatch.fnmatchcase(path, pattern))
return any(ignore(pattern) for pattern in ignore_patterns)
ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns]
dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}}
norm_patterns = []
for p in ignore_patterns:
for dir_suffix in dir_suffixes:
if p.endswith(dir_suffix):
norm_patterns.append(p[:-len(dir_suffix)])
break
else:
norm_patterns.append(p)
all_files = []
ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p]
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks):
for dirname in dirnames[:]:
if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or
os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots):
dirnames.remove(dirname)
if self.verbosity > 1:
self.stdout.write('ignoring directory %s\n' % dirname)
elif dirname == 'locale':
dirnames.remove(dirname)
self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname))
for filename in filenames:
file_path = os.path.normpath(os.path.join(dirpath, filename))
file_ext = os.path.splitext(filename)[1]
if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns):
if self.verbosity > 1:
self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
locale_dir = None
for path in self.locale_paths:
if os.path.abspath(dirpath).startswith(os.path.dirname(path)):
locale_dir = path
break
if not locale_dir:
locale_dir = self.default_locale_path
if not locale_dir:
locale_dir = NO_LOCALE_DIR
all_files.append(self.translatable_file_class(dirpath, filename, locale_dir))
return sorted(all_files)
def process_files(self, file_list):
"""
Group translatable files by locale directory and run pot file build
process for each group.
"""
file_groups = {}
for translatable in file_list:
file_group = file_groups.setdefault(translatable.locale_dir, [])
file_group.append(translatable)
for locale_dir, files in file_groups.items():
self.process_locale_dir(locale_dir, files)
def process_locale_dir(self, locale_dir, files):
"""
Extract translatable literals from the specified files, creating or
updating the POT file for a given locale directory.
Uses the xgettext GNU gettext utility.
"""
build_files = []
for translatable in files:
if self.verbosity > 1:
self.stdout.write('processing file %s in %s\n' % (
translatable.file, translatable.dirpath
))
if self.domain not in ('djangojs', 'django'):
continue
build_file = self.build_file_class(self, self.domain, translatable)
try:
build_file.preprocess()
except UnicodeDecodeError as e:
self.stdout.write(
'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (
translatable.file, translatable.dirpath, e,
)
)
continue
build_files.append(build_file)
if self.domain == 'djangojs':
is_templatized = build_file.is_templatized
args = [
'xgettext',
'-d', self.domain,
'--language=%s' % ('C' if is_templatized else 'JavaScript',),
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--output=-',
]
elif self.domain == 'django':
args = [
'xgettext',
'-d', self.domain,
'--language=Python',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=ugettext_noop',
'--keyword=ugettext_lazy',
'--keyword=ungettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--keyword=pgettext_lazy:1c,2',
'--keyword=npgettext_lazy:1c,2,3',
'--output=-',
]
else:
return
input_files = [bf.work_path for bf in build_files]
with NamedTemporaryFile(mode='w+') as input_files_list:
input_files_list.write(force_str('\n'.join(input_files), encoding=DEFAULT_LOCALE_ENCODING))
input_files_list.flush()
args.extend(['--files-from', input_files_list.name])
args.extend(self.xgettext_options)
msgs, errors, status = gettext_popen_wrapper(args)
if errors:
if status != STATUS_OK:
for build_file in build_files:
build_file.cleanup()
raise CommandError(
'errors happened while running xgettext on %s\n%s' %
('\n'.join(input_files), errors)
)
elif self.verbosity > 0:
# Print warnings
self.stdout.write(errors)
if msgs:
if locale_dir is NO_LOCALE_DIR:
file_path = os.path.normpath(build_files[0].path)
raise CommandError(
'Unable to find a locale path to store translations for '
'file %s' % file_path
)
for build_file in build_files:
msgs = build_file.postprocess_messages(msgs)
potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain))
write_pot_file(potfile, msgs)
for build_file in build_files:
build_file.cleanup()
def write_po_file(self, potfile, locale):
"""
Creates or updates the PO file for self.domain and :param locale:.
Uses contents of the existing :param potfile:.
Uses msgmerge, and msgattrib GNU gettext utilities.
"""
basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % str(self.domain))
if os.path.exists(pofile):
args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile]
msgs, errors, status = gettext_popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
else:
with io.open(potfile, 'r', encoding='utf-8') as fp:
msgs = fp.read()
if not self.invoked_for_django:
msgs = self.copy_plural_forms(msgs, locale)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "")
with io.open(pofile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
if self.no_obsolete:
args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile]
msgs, errors, status = gettext_popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
def copy_plural_forms(self, msgs, locale):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(upath(django.__file__))))
if self.domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with io.open(django_po, 'r', encoding='utf-8') as fp:
m = plural_forms_re.search(fp.read())
if m:
plural_form_line = force_str(m.group('value'))
if self.verbosity > 1:
self.stdout.write("copying plural forms: %s\n" % plural_form_line)
lines = []
found = False
for line in msgs.split('\n'):
if not found and (not line or plural_forms_re.search(line)):
line = '%s\n' % plural_form_line
found = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
| mit |
ministryofjustice/cla_public | cla_public/apps/base/views.py | 1 | 7832 | # coding: utf-8
"""Base app views"""
import os
import logging
import datetime
import re
from urlparse import urlparse, urljoin
from flask import abort, current_app, jsonify, redirect, render_template, session, url_for, request, views
from flask.ext.babel import lazy_gettext as _
import cla_public.apps.base.filters # noqa: F401
import cla_public.apps.base.extensions # noqa: F401
from cla_public.apps.base import base, healthchecks
from cla_public.apps.base.forms import FeedbackForm, ReasonsForContactingForm
from cla_public.apps.checker.api import post_reasons_for_contacting
from cla_public.libs import zendesk
from cla_public.libs.views import AjaxOrNormalMixin, EnsureSessionExists, HasFormMixin
log = logging.getLogger(__name__)
@base.route("/")
def index():
session.clear()
if current_app.config["CLA_ENV"] == "production":
return redirect(current_app.config.get("GOV_UK_START_PAGE"))
return render_template("index.html")
@base.route("/cookies")
def cookies():
return render_template("cookies.html")
@base.route("/cookie-settings")
def cookie_settings():
return render_template("cookie-settings.html")
@base.route("/privacy")
def privacy():
return render_template("privacy.html")
@base.route("/online-safety")
def online_safety():
return render_template("online-safety.html")
@base.route("/accessibility-statement")
def accessibility():
return render_template("accessibility-statement.html")
class AbstractFeedbackView(AjaxOrNormalMixin, HasFormMixin, views.MethodView):
"""
Abstract view for feedback forms
"""
template = None
redirect_to = None
def __init__(self):
if not self.form_class or not self.template or not self.redirect_to:
raise NotImplementedError
super(AbstractFeedbackView, self).__init__()
@property
def default_form_data(self):
return {"referrer": request.referrer or "Unknown"}
def get(self, *args, **kwargs):
return self.render_form(*args, **kwargs)
def post(self):
raise NotImplementedError
def render_form(self, *args, **kwargs):
non_field_errors = kwargs.get("non_field_errors")
non_field_error = non_field_errors[0] if non_field_errors else None
return render_template(self.template, form=self.form, non_field_error=non_field_error)
def success_redirect(self):
return self.redirect(url_for(self.redirect_to))
class Feedback(AbstractFeedbackView):
"""
General feedback form, sending responses to Zendesk
"""
form_class = FeedbackForm
template = "feedback.html"
redirect_to = "base.feedback_confirmation"
def post(self):
kwargs = {}
if self.form.validate_on_submit():
response = zendesk.create_ticket(self.form.api_payload())
if response.status_code < 300:
return self.success_redirect()
else:
kwargs.update(non_field_errors=[_("Something went wrong. Please try again.")])
return self.return_form_errors(**kwargs)
base.add_url_rule("/feedback", view_func=Feedback.as_view("feedback"), methods=("GET", "POST"))
@base.route("/feedback/confirmation")
def feedback_confirmation():
return render_template("feedback-confirmation.html")
class ReasonsForContacting(EnsureSessionExists, AbstractFeedbackView):
"""
Interstitial form to ascertain why users are dropping out of
the checker service
"""
MODEL_REF_SESSION_KEY = "reason_for_contact"
GA_SESSION_KEY = "reason_for_contact_ga"
form_class = ReasonsForContactingForm
template = "reasons-for-contacting.html"
redirect_to = "contact.get_in_touch"
def render_form(self):
referrer = re.sub(r"^(.*:)//([A-Za-z0-9-.]+)(:[0-9]+)?/", "/", request.referrer or "")
return render_template(self.template, form=self.form, referrer=referrer)
def post(self):
if self.form.validate_on_submit():
if len(self.form.reasons.data) == 0:
# allows skipping form if nothing is selected
return self.success_redirect()
session[self.GA_SESSION_KEY] = ", ".join(self.form.reasons.data)
response = post_reasons_for_contacting(form=self.form)
# ignore if reasons not saved as they're not vital
if response and "reference" in response:
session[self.MODEL_REF_SESSION_KEY] = response["reference"]
return self.success_redirect()
return self.return_form_errors()
base.add_url_rule(
"/reasons-for-contacting",
view_func=ReasonsForContacting.as_view("reasons_for_contacting"),
methods=("GET", "POST"),
)
@base.route("/session")
def show_session():
if current_app.debug:
return jsonify(session)
abort(404)
@base.route("/session-expired")
def session_expired():
session.clear()
return render_template("session-expired.html")
@base.route("/session_keep_alive")
def session_keep_alive():
if session and not session.permanent:
session.permanent = True
return jsonify({"session": "OK"})
@base.route("/session_end")
def session_end():
if session:
if not session.permanent:
session.permanent = True
session.expires_override = datetime.datetime.utcnow() + datetime.timedelta(seconds=20)
return jsonify({"session": "CLEAR"})
@base.route("/start")
def get_started():
"""
Redirect to checker unless currently disabled
"""
session.clear()
session.checker["started"] = datetime.datetime.now()
args = {}
if "_ga" in request.args:
args["_ga"] = request.args["_ga"]
if current_app.config.get("CONTACT_ONLY"):
session.checker["contact_only"] = "yes"
return redirect(url_for("contact.get_in_touch", **args))
return redirect(url_for("scope.diagnosis", **args))
def is_safe_url(url):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, url))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
def next_url():
for url in request.values.get("next"), request.referrer:
if url and is_safe_url(url):
return url
return url_for(".index")
@base.route("/locale/<locale>")
def set_locale(locale):
"""
Set locale cookie
"""
if locale[:2] not in [code for code, label in current_app.config["LANGUAGES"]]:
abort(404)
response = redirect(next_url())
expires = datetime.datetime.now() + datetime.timedelta(days=30)
response.set_cookie("locale", locale, expires=expires)
return response
@base.route("/call-me-back")
def redirect_to_contact():
return redirect(url_for("contact.get_in_touch"))
@base.route("/status.json")
def smoke_tests():
"""
Run smoke tests and return results as JSON datastructure
"""
from cla_common.smoketest import smoketest
from cla_public.apps.checker.tests.smoketests import SmokeTests
return jsonify(smoketest(SmokeTests))
@base.route("/ping.json")
def ping():
return jsonify(
{
"version_number": os.environ.get("APPVERSION", os.environ.get("APP_VERSION")),
"build_date": os.environ.get("APP_BUILD_DATE"),
"commit_id": os.environ.get("APP_GIT_COMMIT"),
"build_tag": os.environ.get("APP_BUILD_TAG"),
}
)
@base.route("/healthcheck.json")
def healthcheck():
response = {"disk": healthchecks.check_disk(), "Backend API test": healthchecks.check_backend_api()}
ok = all(item["status"] == healthchecks.HEALTHY for _key, item in response.iteritems())
result = jsonify(response)
result.status_code = 200 if ok else 503
return result
@base.route("/maintenance")
def maintenance_page():
return render_template("maintenance.html")
| mit |
daymer/xWIKI_Karma | CustomModules/mysql-connector-python-2.1.7/tests/test_abstracts.py | 1 | 10372 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Unittests for mysql.connector.abstracts
"""
from decimal import Decimal
from operator import attrgetter
import unittest
import tests
from tests import PY2, foreach_cnx
from mysql.connector.connection import MySQLConnection
from mysql.connector.constants import RefreshOption
from mysql.connector import errors
try:
from mysql.connector.connection_cext import CMySQLConnection
except ImportError:
# Test without C Extension
CMySQLConnection = None
class ConnectionSubclasses(tests.MySQLConnectorTests):
"""Tests for any subclass of MySQLConnectionAbstract
"""
def asEq(self, exp, *cases):
for case in cases:
self.assertEqual(exp, case)
@foreach_cnx()
def test_properties_getter(self):
properties = [
(self.config['user'], 'user'),
(self.config['host'], 'server_host'),
(self.config['port'], 'server_port'),
(self.config['unix_socket'], 'unix_socket'),
(self.config['database'], 'database')
]
for exp, property in properties:
f = attrgetter(property)
self.asEq(exp, f(self.cnx))
@foreach_cnx()
def test_time_zone(self):
orig = self.cnx.info_query("SELECT @@session.time_zone")[0]
self.assertEqual(orig, self.cnx.time_zone)
self.cnx.time_zone = "+02:00"
self.assertEqual("+02:00", self.cnx.time_zone)
@foreach_cnx()
def test_sql_mode(self):
orig = self.cnx.info_query("SELECT @@session.sql_mode")[0]
self.assertEqual(orig, self.cnx.sql_mode)
try:
self.cnx.sql_mode = 'SPAM'
except errors.ProgrammingError:
pass # excepted
else:
self.fail("ProgrammingError not raises")
# Set SQL Mode to a list of modes
if tests.MYSQL_VERSION[0:3] < (5, 7, 4):
exp = ('STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,'
'NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,'
'NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION')
else:
exp = ('STRICT_TRANS_TABLES,STRICT_ALL_TABLES,TRADITIONAL,'
'NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION')
try:
self.cnx.sql_mode = exp
except errors.Error as err:
self.fail("Failed setting SQL Mode with multiple "
"modes: {0}".format(str(err)))
self.assertEqual(exp, self.cnx._sql_mode)
# SQL Modes must be empty
self.cnx.sql_mode = ''
self.assertEqual('', self.cnx.sql_mode)
# Set SQL Mode and check
sql_mode = exp = 'STRICT_ALL_TABLES'
self.cnx.sql_mode = sql_mode
self.assertEqual(exp, self.cnx.sql_mode)
# Unset the SQL Mode again
self.cnx.sql_mode = ''
self.assertEqual('', self.cnx.sql_mode)
@foreach_cnx()
def test_in_transaction(self):
self.cnx.cmd_query('START TRANSACTION')
self.assertTrue(self.cnx.in_transaction)
self.cnx.cmd_query('ROLLBACK')
self.assertFalse(self.cnx.in_transaction)
# AUTO_COMMIT turned ON
self.cnx.autocommit = True
self.assertFalse(self.cnx.in_transaction)
self.cnx.cmd_query('START TRANSACTION')
self.assertTrue(self.cnx.in_transaction)
@foreach_cnx()
def test_disconnect(self):
self.cnx.disconnect()
self.assertFalse(self.cnx.is_connected())
@foreach_cnx()
def test_is_connected(self):
"""Check connection to MySQL Server"""
self.assertEqual(True, self.cnx.is_connected())
self.cnx.disconnect()
self.assertEqual(False, self.cnx.is_connected())
@foreach_cnx()
def test_info_query(self):
queries = [
("SELECT 1",
(1,)),
("SELECT 'ham', 'spam'",
((u'ham', u'spam')))
]
for query, exp in queries:
self.assertEqual(exp, self.cnx.info_query(query))
@foreach_cnx()
def test_cmd_init_db(self):
self.assertRaises(errors.ProgrammingError,
self.cnx.cmd_init_db, 'unknown_database')
self.cnx.cmd_init_db(u'INFORMATION_SCHEMA')
self.assertEqual('INFORMATION_SCHEMA', self.cnx.database.upper())
self.cnx.cmd_init_db('mysql')
self.assertEqual(u'mysql', self.cnx.database)
self.cnx.cmd_init_db('myconnpy')
self.assertEqual(u'myconnpy', self.cnx.database)
@foreach_cnx()
def test_reset_session(self):
exp = [True, u'STRICT_ALL_TABLES', u'-09:00', 33]
self.cnx.autocommit = exp[0]
self.cnx.sql_mode = exp[1]
self.cnx.time_zone = exp[2]
self.cnx.set_charset_collation(exp[3])
user_variables = {'ham': '1', 'spam': '2'}
session_variables = {'wait_timeout': 100000}
self.cnx.reset_session(user_variables, session_variables)
self.assertEqual(exp, [self.cnx.autocommit, self.cnx.sql_mode,
self.cnx.time_zone, self.cnx._charset_id])
exp_user_variables = {'ham': '1', 'spam': '2'}
exp_session_variables = {'wait_timeout': 100000}
for key, value in exp_user_variables.items():
row = self.cnx.info_query("SELECT @{0}".format(key))
self.assertEqual(value, row[0])
for key, value in exp_session_variables.items():
row = self.cnx.info_query("SELECT @@session.{0}".format(key))
self.assertEqual(value, row[0])
@unittest.skipIf(tests.MYSQL_VERSION > (5, 7, 10),
"As of MySQL 5.7.11, mysql_refresh() is deprecated")
@foreach_cnx()
def test_cmd_refresh(self):
refresh = RefreshOption.LOG | RefreshOption.THREADS
exp = {'insert_id': 0, 'affected_rows': 0,
'field_count': 0, 'warning_count': 0,
'status_flag': 0}
result = self.cnx.cmd_refresh(refresh)
for key in set(result.keys()) ^ set(exp.keys()):
try:
del result[key]
except KeyError:
del exp[key]
self.assertEqual(exp, result)
query = "SHOW GLOBAL STATUS LIKE 'Uptime_since_flush_status'"
pre_flush = int(self.cnx.info_query(query)[1])
self.cnx.cmd_refresh(RefreshOption.STATUS)
post_flush = int(self.cnx.info_query(query)[1])
self.assertTrue(post_flush <= pre_flush)
@foreach_cnx()
def test_cmd_quit(self):
self.cnx.cmd_quit()
self.assertFalse(self.cnx.is_connected())
@unittest.skipIf(tests.MYSQL_VERSION >= (8, 0, 1),
"As of MySQL 8.0.1, CMD_SHUTDOWN is not recognized.")
@foreach_cnx()
def test_cmd_shutdown(self):
server = tests.MYSQL_SERVERS[0]
# We make sure the connection is re-established.
self.cnx = self.cnx.__class__(**self.config)
self.cnx.cmd_shutdown()
if not server.wait_down():
self.fail("[{0}] ".format(self.cnx.__class__.__name__) +
"MySQL not shut down after cmd_shutdown()")
self.assertRaises(errors.Error, self.cnx.cmd_shutdown)
server.start()
if not server.wait_up():
self.fail("Failed restarting MySQL server after test")
@foreach_cnx()
def test_cmd_statistics(self):
exp = {
'Uptime': int,
'Open tables': int,
'Queries per second avg': Decimal,
'Slow queries': int,
'Threads': int,
'Questions': int,
'Flush tables': int,
'Opens': int
}
stat = self.cnx.cmd_statistics()
self.assertEqual(len(exp), len(stat))
for key, type_ in exp.items():
self.assertTrue(key in stat)
self.assertTrue(isinstance(stat[key], type_))
@foreach_cnx()
def test_cmd_process_info(self):
self.assertRaises(errors.NotSupportedError,
self.cnx.cmd_process_info)
@foreach_cnx()
def test_cmd_process_kill(self):
other_cnx = self.cnx.__class__(**self.config)
pid = other_cnx.connection_id
self.cnx.cmd_process_kill(pid)
self.assertFalse(other_cnx.is_connected())
@foreach_cnx()
def test_start_transaction(self):
self.cnx.start_transaction()
self.assertTrue(self.cnx.in_transaction)
self.cnx.rollback()
self.cnx.start_transaction(consistent_snapshot=True)
self.assertTrue(self.cnx.in_transaction)
self.assertRaises(errors.ProgrammingError,
self.cnx.start_transaction)
self.cnx.rollback()
levels = ['READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ',
'SERIALIZABLE',
'READ-UNCOMMITTED', 'READ-COMMITTED', 'REPEATABLE-READ',
'SERIALIZABLE']
for level in levels:
level = level.replace(' ', '-')
self.cnx.start_transaction(isolation_level=level)
self.assertTrue(self.cnx.in_transaction)
self.cnx.rollback()
self.assertRaises(ValueError,
self.cnx.start_transaction,
isolation_level='spam')
| apache-2.0 |
synergeticsedx/deployment-wipro | common/djangoapps/enrollment/tests/test_api.py | 16 | 12613 | """
Tests for student enrollment.
"""
from mock import patch, Mock
import ddt
from nose.tools import raises
import unittest
from django.test.utils import override_settings
from django.conf import settings
from course_modes.models import CourseMode
from enrollment import api
from enrollment.errors import EnrollmentApiLoadError, EnrollmentNotFoundError, CourseModeNotFoundError
from enrollment.tests import fake_data_api
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
@ddt.ddt
@override_settings(ENROLLMENT_DATA_API="enrollment.tests.fake_data_api")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class EnrollmentTest(CacheIsolationTestCase):
"""
Test student enrollment, especially with different course modes.
"""
USERNAME = "Bob"
COURSE_ID = "some/great/course"
ENABLED_CACHES = ['default']
def setUp(self):
super(EnrollmentTest, self).setUp()
fake_data_api.reset()
@ddt.data(
# Default (no course modes in the database)
# Expect automatically being enrolled as "honor".
([], 'honor'),
# Audit / Verified / Honor
# We should always go to the "choose your course" page.
# We should also be enrolled as "honor" by default.
(['honor', 'verified', 'audit'], 'honor'),
# Check for professional ed happy path.
(['professional'], 'professional'),
(['no-id-professional'], 'no-id-professional')
)
@ddt.unpack
def test_enroll(self, course_modes, mode):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=course_modes)
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID, mode=mode)
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], mode)
get_result = api.get_enrollment(self.USERNAME, self.COURSE_ID)
self.assertEquals(result, get_result)
@ddt.data(
([CourseMode.DEFAULT_MODE_SLUG, 'verified', 'credit'], CourseMode.DEFAULT_MODE_SLUG),
(['audit', 'verified', 'credit'], 'audit'),
(['honor', 'verified', 'credit'], 'honor'),
)
@ddt.unpack
def test_enroll_no_mode_success(self, course_modes, expected_mode):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=course_modes)
with patch('enrollment.api.CourseMode.modes_for_course') as mock_modes_for_course:
mock_course_modes = [Mock(slug=mode) for mode in course_modes]
mock_modes_for_course.return_value = mock_course_modes
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID)
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], expected_mode)
@ddt.data(
['professional'],
['verified'],
['verified', 'professional'],
)
@raises(CourseModeNotFoundError)
def test_enroll_no_mode_error(self, course_modes):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=course_modes)
# Enroll in the course and verify that we raise CourseModeNotFoundError
api.add_enrollment(self.USERNAME, self.COURSE_ID)
@raises(CourseModeNotFoundError)
def test_prof_ed_enroll(self):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['professional'])
# Enroll in the course and verify the URL we get sent to
api.add_enrollment(self.USERNAME, self.COURSE_ID, mode='verified')
@ddt.data(
# Default (no course modes in the database)
# Expect that users are automatically enrolled as "honor".
([], 'honor'),
# Audit / Verified / Honor
# We should always go to the "choose your course" page.
# We should also be enrolled as "honor" by default.
(['honor', 'verified', 'audit'], 'honor'),
# Check for professional ed happy path.
(['professional'], 'professional'),
(['no-id-professional'], 'no-id-professional')
)
@ddt.unpack
def test_unenroll(self, course_modes, mode):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=course_modes)
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID, mode=mode)
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], mode)
self.assertTrue(result['is_active'])
result = api.update_enrollment(self.USERNAME, self.COURSE_ID, mode=mode, is_active=False)
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], mode)
self.assertFalse(result['is_active'])
@raises(EnrollmentNotFoundError)
def test_unenroll_not_enrolled_in_course(self):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor'])
api.update_enrollment(self.USERNAME, self.COURSE_ID, mode='honor', is_active=False)
@ddt.data(
# Simple test of honor and verified.
([
{'course_id': 'the/first/course', 'course_modes': [], 'mode': 'honor'},
{'course_id': 'the/second/course', 'course_modes': ['honor', 'verified'], 'mode': 'verified'}
]),
# No enrollments
([]),
# One Enrollment
([
{'course_id': 'the/third/course', 'course_modes': ['honor', 'verified', 'audit'], 'mode': 'audit'}
]),
)
def test_get_all_enrollments(self, enrollments):
for enrollment in enrollments:
fake_data_api.add_course(enrollment['course_id'], course_modes=enrollment['course_modes'])
api.add_enrollment(self.USERNAME, enrollment['course_id'], enrollment['mode'])
result = api.get_enrollments(self.USERNAME)
self.assertEqual(len(enrollments), len(result))
for result_enrollment in result:
self.assertIn(
result_enrollment['course']['course_id'],
[enrollment['course_id'] for enrollment in enrollments]
)
def test_update_enrollment(self):
# Add fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor', 'verified', 'audit'])
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID, mode='audit')
get_result = api.get_enrollment(self.USERNAME, self.COURSE_ID)
self.assertEquals(result, get_result)
result = api.update_enrollment(self.USERNAME, self.COURSE_ID, mode='honor')
self.assertEquals('honor', result['mode'])
result = api.update_enrollment(self.USERNAME, self.COURSE_ID, mode='verified')
self.assertEquals('verified', result['mode'])
def test_update_enrollment_attributes(self):
# Add fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor', 'verified', 'audit', 'credit'])
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID, mode='audit')
get_result = api.get_enrollment(self.USERNAME, self.COURSE_ID)
self.assertEquals(result, get_result)
enrollment_attributes = [
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
}
]
result = api.update_enrollment(
self.USERNAME, self.COURSE_ID, mode='credit', enrollment_attributes=enrollment_attributes
)
self.assertEquals('credit', result['mode'])
attributes = api.get_enrollment_attributes(self.USERNAME, self.COURSE_ID)
self.assertEquals(enrollment_attributes[0], attributes[0])
def test_get_course_details(self):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor', 'verified', 'audit'])
result = api.get_course_enrollment_details(self.COURSE_ID)
self.assertEquals(result['course_id'], self.COURSE_ID)
self.assertEquals(3, len(result['course_modes']))
@override_settings(ENROLLMENT_DATA_API='foo.bar.biz.baz')
@raises(EnrollmentApiLoadError)
def test_data_api_config_error(self):
# Enroll in the course and verify the URL we get sent to
api.add_enrollment(self.USERNAME, self.COURSE_ID, mode='audit')
def test_caching(self):
# Add fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor', 'verified', 'audit'])
# Hit the fake data API.
details = api.get_course_enrollment_details(self.COURSE_ID)
# Reset the fake data API, should rely on the cache.
fake_data_api.reset()
cached_details = api.get_course_enrollment_details(self.COURSE_ID)
# The data matches
self.assertEqual(len(details['course_modes']), 3)
self.assertEqual(details, cached_details)
def test_update_enrollment_expired_mode_with_error(self):
""" Verify that if verified mode is expired and include expire flag is
false then enrollment cannot be updated. """
self.assert_add_modes_with_enrollment('audit')
# On updating enrollment mode to verified it should the raise the error.
with self.assertRaises(CourseModeNotFoundError):
self.assert_update_enrollment(mode='verified', include_expired=False)
def test_update_enrollment_with_expired_mode(self):
""" Verify that if verified mode is expired then enrollment can be
updated if include_expired flag is true."""
self.assert_add_modes_with_enrollment('audit')
# enrollment in verified mode will work fine with include_expired=True
self.assert_update_enrollment(mode='verified', include_expired=True)
@ddt.data(True, False)
def test_unenroll_with_expired_mode(self, include_expired):
""" Verify that un-enroll will work fine for expired courses whether include_expired
is true or false."""
self.assert_add_modes_with_enrollment('verified')
self.assert_update_enrollment(mode='verified', is_active=False, include_expired=include_expired)
def assert_add_modes_with_enrollment(self, enrollment_mode):
""" Dry method for adding fake course enrollment information to fake
data API and enroll the student in the course. """
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor', 'verified', 'audit'])
result = api.add_enrollment(self.USERNAME, self.COURSE_ID, mode=enrollment_mode)
get_result = api.get_enrollment(self.USERNAME, self.COURSE_ID)
self.assertEquals(result, get_result)
# set the course verify mode as expire.
fake_data_api.set_expired_mode(self.COURSE_ID)
def assert_update_enrollment(self, mode, is_active=True, include_expired=False):
""" Dry method for updating enrollment."""
result = api.update_enrollment(
self.USERNAME, self.COURSE_ID, mode=mode, is_active=is_active, include_expired=include_expired
)
self.assertEquals(mode, result['mode'])
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], mode)
if is_active:
self.assertTrue(result['is_active'])
else:
self.assertFalse(result['is_active'])
| agpl-3.0 |
captain-pool/GSOC | E3_Distill_ESRGAN/main.py | 1 | 5562 | """
Compressing GANs using Knowledge Distillation.
Teacher GAN: ESRGAN (https://github.com/captain-pool/E2_ESRGAN)
Citation:
@article{DBLP:journals/corr/abs-1902-00159,
author = {Angeline Aguinaldo and
Ping{-}Yeh Chiang and
Alexander Gain and
Ameya Patil and
Kolten Pearson and
Soheil Feizi},
title = {Compressing GANs using Knowledge Distillation},
journal = {CoRR},
volume = {abs/1902.00159},
year = {2019},
url = {http://arxiv.org/abs/1902.00159},
archivePrefix = {arXiv},
eprint = {1902.00159},
timestamp = {Tue, 21 May 2019 18:03:39 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1902-00159},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
import os
from absl import logging
import argparse
from libs import lazy_loader
from libs import model
from libs import settings
import tensorflow as tf
def train_and_export(**kwargs):
""" Train and Export Compressed ESRGAN
Args:
config: path to config file.
logdir: path to logging directory
modeldir: Path to store the checkpoints and exported model.
datadir: Path to custom data directory.
manual: Boolean to indicate if `datadir` contains Raw Files(True) / TFRecords (False)
"""
lazy = lazy_loader.LazyLoader()
student_settings = settings.Settings(
kwargs["config"], use_student_settings=True)
# Lazy importing dependencies from teacher
lazy.import_("teacher_imports", parent="libs", return_=False)
lazy.import_("teacher", parent="libs.models", return_=False)
lazy.import_("train", parent="libs", return_=False)
lazy.import_("utils", parent="libs", return_=False)
globals().update(lazy.import_dict)
tf.random.set_seed(10)
teacher_settings = settings.Settings(
student_settings["teacher_config"], use_student_settings=False)
stats = settings.Stats(os.path.join(student_settings.path, "stats.yaml"))
strategy = utils.SingleDeviceStrategy()
if kwargs["tpu"]:
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
kwargs["tpu"])
tf.config.experimental_connect_to_host(cluster_resolver.get_master())
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
device_name = utils.assign_to_worker(kwargs["tpu"])
with tf.device(device_name), strategy.scope():
summary_writer = tf.summary.create_file_writer(
os.path.join(kwargs["logdir"], "student"))
teacher_summary_writer = tf.summary.create_file_writer(
os.path.join(kwargs["logdir"], "teacher"))
teacher_generator = teacher.generator(out_channel=3, first_call=False)
teacher_discriminator = teacher.discriminator(
batch_size=teacher_settings["batch_size"])
student_generator = (
model.Registry
.models[student_settings["student_network"]]())
hr_size = tf.cast(tf.convert_to_tensor([1] + student_settings['hr_size']), tf.float32)
lr_size = tf.cast(hr_size * tf.convert_to_tensor([1, 1/4, 1/4, 1]), tf.int32)
logging.debug("Initializing Convolutions")
student_generator.unsigned_call(tf.random.normal(lr_size))
trainer = train.Trainer(
teacher_generator,
teacher_discriminator,
summary_writer,
summary_writer_2=teacher_summary_writer,
model_dir=kwargs["modeldir"],
data_dir=kwargs["datadir"],
strategy=strategy)
phase_name = None
if kwargs["type"].lower().startswith("comparative"):
trainer.train_comparative(
student_generator,
export_only=stats.get("comparative") or kwargs["export_only"])
if not kwargs["export_only"]:
stats["comparative"] = True
elif kwargs["type"].lower().startswith("adversarial"):
trainer.train_adversarial(
student_generator,
export_only=stats.get("adversarial") or kwargs["export_only"])
if not kwargs["export_only"]:
stats["adversarial"] = True
# Tracing Graph to put input signature
_ = student_generator.predict(
tf.random.normal([1, 180, 320, 3]))
tf.saved_model.save(
student_generator,
os.path.join(
kwargs["modeldir"],
"compressed_esrgan"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tpu", default=None, help="Name of the TPU to use")
parser.add_argument("--logdir", default=None, help="Path to log directory")
parser.add_argument(
"--export_only",
default=False,
action="store_true",
help="Do not train, only export the model")
parser.add_argument(
"--config",
default="config/config.yaml",
help="path to config file")
parser.add_argument(
"--datadir",
default=None,
help="Path to custom data directory containing sharded TFRecords")
parser.add_argument(
"--modeldir",
default=None,
help="directory to store checkpoints and SavedModel")
parser.add_argument(
"--type",
default=None,
help="Train Student 'adversarial'-ly / 'comparative'-ly")
parser.add_argument(
"--verbose",
"-v",
default=0,
action="count",
help="Increases Verbosity. Repeat to increase more")
FLAGS, unparsed = parser.parse_known_args()
log_levels = [logging.FATAL, logging.WARNING, logging.INFO, logging.DEBUG]
log_level = log_levels[min(FLAGS.verbose, len(log_levels) - 1)]
logging.set_verbosity(log_level)
train_and_export(**vars(FLAGS))
| mit |
pigeonflight/strider-plone | docker/appengine/lib/django-1.4/tests/runtests.py | 25 | 12973 | #!/usr/bin/env python
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
from django import contrib
# databrowse is deprecated, but we still want to run its tests
warnings.filterwarnings('ignore', "The Databrowse contrib app is deprecated",
PendingDeprecationWarning, 'django.contrib.databrowse')
CONTRIB_DIR_NAME = 'django.contrib'
MODEL_TESTS_DIR_NAME = 'modeltests'
REGRESSION_TESTS_DIR_NAME = 'regressiontests'
TEST_TEMPLATE_DIR = 'templates'
RUNTESTS_DIR = os.path.dirname(__file__)
CONTRIB_DIR = os.path.dirname(contrib.__file__)
MODEL_TEST_DIR = os.path.join(RUNTESTS_DIR, MODEL_TESTS_DIR_NAME)
REGRESSION_TEST_DIR = os.path.join(RUNTESTS_DIR, REGRESSION_TESTS_DIR_NAME)
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
REGRESSION_SUBDIRS_TO_SKIP = ['locale']
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.databrowse',
'django.contrib.staticfiles',
'django.contrib.humanize',
'regressiontests.staticfiles_tests',
'regressiontests.staticfiles_tests.apps.test',
'regressiontests.staticfiles_tests.apps.no_label',
]
def geodjango(settings):
# All databases must have spatial backends to run GeoDjango tests.
spatial_dbs = [name for name, db_dict in settings.DATABASES.items()
if db_dict['ENGINE'].startswith('django.contrib.gis')]
return len(spatial_dbs) == len(settings.DATABASES)
def get_test_modules():
modules = []
for loc, dirpath in (
(MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR),
(REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR),
(CONTRIB_DIR_NAME, CONTRIB_DIR)):
for f in os.listdir(dirpath):
if (f.startswith('__init__') or
f.startswith('.') or
f.startswith('sql') or
os.path.basename(f) in REGRESSION_SUBDIRS_TO_SKIP):
continue
modules.append((loc, f))
return modules
def setup(verbosity, test_labels):
from django.conf import settings
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'USE_I18N': settings.USE_I18N,
'LOGIN_URL': settings.LOGIN_URL,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.USE_I18N = True
settings.LANGUAGE_CODE = 'en'
settings.LOGIN_URL = '/accounts/login/'
settings.MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware',
)
settings.SITE_ID = 1
# For testing comment-utils, we require the MANAGERS attribute
# to be set, so that a test email is sent out which we catch
# in our tests.
settings.MANAGERS = ("admin@djangoproject.com",)
# Load all the ALWAYS_INSTALLED_APPS.
# (This import statement is intentionally delayed until after we
# access settings because of the USE_I18N dependency.)
from django.db.models.loading import get_apps, load_app
get_apps()
# Load all the test model apps.
test_labels_set = set([label.split('.')[0] for label in test_labels])
test_modules = get_test_modules()
# If GeoDjango, then we'll want to add in the test applications
# that are a part of its test suite.
if geodjango(settings):
from django.contrib.gis.tests import geo_apps
test_modules.extend(geo_apps(runtests=True))
settings.INSTALLED_APPS.extend(['django.contrib.gis', 'django.contrib.sitemaps'])
for module_dir, module_name in test_modules:
module_label = '.'.join([module_dir, module_name])
# if the module was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to the list to test.
if not test_labels or module_name in test_labels_set:
if verbosity >= 2:
print "Importing application %s" % module_name
mod = load_app(module_label)
if mod:
if module_label not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(module_label)
return state
def teardown(state):
from django.conf import settings
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(unicode(TEMP_DIR))
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
state = setup(verbosity, test_labels)
extra_tests = []
# If GeoDjango is used, add it's tests that aren't a part of
# an application (e.g., GEOS, GDAL, Distance objects).
if geodjango(settings) and (not test_labels or 'gis' in test_labels):
from django.contrib.gis.tests import geodjango_suite
extra_tests.append(geodjango_suite(apps=False))
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=verbosity, interactive=interactive,
failfast=failfast)
failures = test_runner.run_tests(test_labels, extra_tests=extra_tests)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(int(options.verbosity), test_labels)
if not test_labels:
# Get the full list of test labels to use for bisection
from django.db.models.loading import get_apps
test_labels = [app.__name__.split('.')[-2] for app in get_apps()]
print '***** Bisecting test suite:',' '.join(test_labels)
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels)/2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print '***** Pass %da: Running the first half of the test suite' % iteration
print '***** Test labels:',' '.join(test_labels_a)
failures_a = subprocess.call(subprocess_args + test_labels_a)
print '***** Pass %db: Running the second half of the test suite' % iteration
print '***** Test labels:',' '.join(test_labels_b)
print
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print "***** Problem found in first half. Bisecting again..."
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print "***** Problem found in second half. Bisecting again..."
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print "***** Multiple sources of failure found"
break
else:
print "***** No source of failure found... try pair execution (--pair)"
break
if len(test_labels) == 1:
print "***** Source of error:",test_labels[0]
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(int(options.verbosity), test_labels)
if not test_labels:
print ""
# Get the full list of test labels to use for bisection
from django.db.models.loading import get_apps
test_labels = [app.__name__.split('.')[-2] for app in get_apps()]
print '***** Trying paired execution'
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print '***** %d of %d: Check test pairing with %s' % (
i+1, len(test_labels), label)
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print '***** Found problem pair with',label
return
print '***** No problem pair found'
teardown(state)
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [module module module ...]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v','--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all '
'output')
parser.add_option(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_option(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, the DJANGO_SETTINGS_MODULE environment '
'variable will be used.')
parser.add_option(
'--bisect', action='store', dest='bisect', default=None,
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_option(
'--pair', action='store', dest='pair', default=None,
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_option(
'--liveserver', action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.'),
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
elif "DJANGO_SETTINGS_MODULE" not in os.environ:
parser.error("DJANGO_SETTINGS_MODULE is not set in the environment. "
"Set it or use --settings.")
else:
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.bisect:
bisect_tests(options.bisect, options, args)
elif options.pair:
paired_tests(options.pair, options, args)
else:
failures = django_tests(int(options.verbosity), options.interactive,
options.failfast, args)
if failures:
sys.exit(bool(failures))
| mit |
bharadwajyarlagadda/korona | tests/test_html_tags/test_col_tag.py | 1 | 1308 | # -*- coding: utf-8 -*-
import pytest
from ..fixtures import parametrize
from korona.html.tags import Col
from korona.templates.html.tags import col
from korona.exceptions import TagAttributeError
@parametrize('attributes', [
({'align': 'char'}),
({'align': 'char', 'char': '.'}),
({'align': 'char', 'char': '.', 'charoff': '2'}),
({'align': 'left', 'span': '2'}),
({'align': 'right', 'valign': 'top'}),
({'width': '130'})
])
def test_construct_col_tag(attributes):
"""Test for validating whether the col tag is constructed correctly or
not.
"""
column = Col(**attributes)
assert column.construct() == col.render(attributes)
@parametrize('attributes,exception,error_msg', [
({'char': '.'}, AttributeError, 'The char attribute can only be used'),
({'charoff': '2'}, AttributeError, 'The charoff attribute can only be'),
({'char': '.', 'charoff': '2'},
AttributeError,
'The char attribute can only be used'),
({'align': 'left', 'charoff': '2'},
AttributeError,
'The charoff attribute can only be')
])
def test_construct_col_tag_error(attributes, exception, error_msg):
"""Test for validating col tag's attributes."""
with pytest.raises(exception) as exc:
Col(**attributes)
assert error_msg in str(exc)
| mit |
tlangerak/Multi-Agent-Systems | setup.py | 1 | 1846 | #!/usr/bin/env python
import sys
import subprocess
from setuptools import setup, Extension
from runspade import __version__
try:
import bdist_mpkg
except:
# This is not a mac
pass
if sys.platform == "win32":
ext = Extension("tlslite.utils.win32prng",
sources=["tlslite/utils/win32prng.c"],
libraries=["advapi32"])
exts = [ext]
else:
exts = []
with open('README') as file:
long_description = file.read()
deps = [
"SPARQLWrapper",
"unittest-xml-reporting"]
if subprocess.mswindows:
deps.append( 'pywin32' )
setup(name='SPADE',
version=__version__,
license="LGPL",
description='Smart Python multi-Agent Development Environment',
long_description=long_description,
author='Javier Palanca, Gustavo Aranda, Miguel Escriva and others',
author_email='jpalanca@gmail.com',
url='https://github.com/javipalanca/SPADE',
package_dir={'spade': 'spade'},
packages=['spade','spade.mtps', 'xmpp', 'xmppd', 'xmppd.modules', 'xmppd.socker', 'tlslite', 'tlslite.utils', 'tlslite.integration'],
scripts=['runspade.py','configure.py'],#,"tlslite/scripts/tls.py", "tlslite/scripts/tlsdb.py"],
package_data={'spade':['templates/*.*', 'templates/images/*.*'],},
include_package_data=True,
ext_modules=exts,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Adaptive Technologies',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
install_requires = deps
)
| lgpl-2.1 |
lmorchard/django | django/core/management/commands/makemigrations.py | 38 | 12571 | import os
import sys
from itertools import takewhile
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db.migrations import Migration
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
InteractiveMigrationQuestioner, MigrationQuestioner,
NonInteractiveMigrationQuestioner,
)
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from django.utils.six import iteritems
from django.utils.six.moves import zip
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to create migrations for.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', default=False,
help="Just show what migrations would be made; don't actually write them.")
parser.add_argument('--merge', action='store_true', dest='merge', default=False,
help="Enable fixing of migration conflicts.")
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Create an empty migration.")
parser.add_argument('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('-n', '--name', action='store', dest='name', default=None,
help="Use this name for migration file(s).")
parser.add_argument('-e', '--exit', action='store_true', dest='exit_code', default=False,
help='Exit with error code 1 if no changes needing migrations are found.')
def handle(self, *app_labels, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.dry_run = options.get('dry_run', False)
self.merge = options.get('merge', False)
self.empty = options.get('empty', False)
self.migration_name = options.get('name')
self.exit_code = options.get('exit_code', False)
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = {
app_label: conflict for app_label, conflict in iteritems(conflicts)
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
else:
questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = {
app: [Migration("custom", app)]
for app in app_labels
}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
if self.exit_code:
sys.exit(1)
else:
return
self.write_migration_files(changes)
def write_migration_files(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(writer.filename),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "wb") as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
all_items_equal = lambda seq: all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*[m.ancestry for m in merge_migrations])
common_ancestor_count = sum(1 for common_ancestor_generation
in takewhile(all_items_equal, merge_migrations_generations))
if not common_ancestor_count:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
writer = MigrationWriter(new_migration)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will output the merge migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
| bsd-3-clause |
gabyx/GRSFramework | simulations/python/modules/GRSFTools/Transformations/Transformations.py | 2 | 65541 |
# -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006-2014, Christoph Gohlke
# Copyright (c) 2006-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2013.06.29
Requirements
------------
* `CPython 2.7 or 3.3 <http://www.python.org>`_
* `Numpy 1.7 <http://www.numpy.org>`_
* `Transformations.c 2013.01.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2013.06.29'
__docformat__ = 'restructuredtext en'
__all__ = []
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except ImportError:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
_import_module('_transformations')
if __name__ == "__main__":
import doctest
import random # used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
| gpl-3.0 |
nnethercote/servo | tests/wpt/web-platform-tests/webdriver/tests/perform_actions/pointer_origin.py | 20 | 4728 | import pytest
from webdriver import MoveTargetOutOfBoundsException
from tests.perform_actions.support.mouse import get_inview_center, get_viewport_rect
from tests.support.inline import inline
def origin_doc(inner_style, outer_style=""):
return inline("""
<div id="outer" style="{1}"
onmousemove="window.coords = {{x: event.clientX, y: event.clientY}}">
<div id="inner" style="{0}"></div>
</div>
""".format(inner_style, outer_style))
def get_click_coordinates(session):
return session.execute_script("return window.coords;")
def test_viewport_inside(session, mouse_chain):
point = {"x": 50, "y": 50}
session.url = origin_doc("width: 100px; height: 50px; background: green;")
mouse_chain \
.pointer_move(point["x"], point["y"], origin="viewport") \
.perform()
click_coords = session.execute_script("return window.coords;")
assert click_coords["x"] == pytest.approx(point["x"], abs = 1.0)
assert click_coords["y"] == pytest.approx(point["y"], abs = 1.0)
def test_viewport_outside(session, mouse_chain):
with pytest.raises(MoveTargetOutOfBoundsException):
mouse_chain \
.pointer_move(-50, -50, origin="viewport") \
.perform()
def test_pointer_inside(session, mouse_chain):
start_point = {"x": 50, "y": 50}
offset = {"x": 10, "y": 5}
session.url = origin_doc("width: 100px; height: 50px; background: green;")
mouse_chain \
.pointer_move(start_point["x"], start_point["y"]) \
.pointer_move(offset["x"], offset["y"], origin="pointer") \
.perform()
click_coords = session.execute_script("return window.coords;")
assert click_coords["x"] == pytest.approx(start_point["x"] + offset["x"], abs = 1.0)
assert click_coords["y"] == pytest.approx(start_point["y"] + offset["y"], abs = 1.0)
def test_pointer_outside(session, mouse_chain):
with pytest.raises(MoveTargetOutOfBoundsException):
mouse_chain \
.pointer_move(-50, -50, origin="pointer") \
.perform()
def test_element_center_point(session, mouse_chain):
session.url = origin_doc("width: 100px; height: 50px; background: green;")
elem = session.find.css("#inner", all=False)
center = get_inview_center(elem.rect, get_viewport_rect(session))
mouse_chain \
.pointer_move(0, 0, origin=elem) \
.perform()
click_coords = get_click_coordinates(session)
assert click_coords["x"] == pytest.approx(center["x"], abs = 1.0)
assert click_coords["y"] == pytest.approx(center["y"], abs = 1.0)
def test_element_center_point_with_offset(session, mouse_chain):
session.url = origin_doc("width: 100px; height: 50px; background: green;")
elem = session.find.css("#inner", all=False)
center = get_inview_center(elem.rect, get_viewport_rect(session))
mouse_chain \
.pointer_move(10, 15, origin=elem) \
.perform()
click_coords = get_click_coordinates(session)
assert click_coords["x"] == pytest.approx(center["x"] + 10, abs = 1.0)
assert click_coords["y"] == pytest.approx(center["y"] + 15, abs = 1.0)
def test_element_in_view_center_point_partly_visible(session, mouse_chain):
session.url = origin_doc("""width: 100px; height: 50px; background: green;
position: relative; left: -50px; top: -25px;""")
elem = session.find.css("#inner", all=False)
center = get_inview_center(elem.rect, get_viewport_rect(session))
mouse_chain \
.pointer_move(0, 0, origin=elem) \
.perform()
click_coords = get_click_coordinates(session)
assert click_coords["x"] == pytest.approx(center["x"], abs = 1.0)
assert click_coords["y"] == pytest.approx(center["y"], abs = 1.0)
def test_element_larger_than_viewport(session, mouse_chain):
session.url = origin_doc("width: 300vw; height: 300vh; background: green;")
elem = session.find.css("#inner", all=False)
center = get_inview_center(elem.rect, get_viewport_rect(session))
mouse_chain \
.pointer_move(0, 0, origin=elem) \
.perform()
click_coords = get_click_coordinates(session)
assert click_coords["x"] == pytest.approx(center["x"], abs = 1.0)
assert click_coords["y"] == pytest.approx(center["y"], abs = 1.0)
def test_element_outside_of_view_port(session, mouse_chain):
session.url = origin_doc("""width: 100px; height: 50px; background: green;
position: relative; left: -200px; top: -100px;""")
elem = session.find.css("#inner", all=False)
with pytest.raises(MoveTargetOutOfBoundsException):
mouse_chain \
.pointer_move(0, 0, origin=elem) \
.perform()
| mpl-2.0 |
rdipietro/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops_test.py | 8 | 33515 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import test_util
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.x_size = 7
self.channel_size = 3
self.z_size = 4
self.probs_size = 11
tensor = tf.range(0, self.x_size * self.channel_size * self.z_size *
self.probs_size)
tensor = tf.reshape(tensor, [self.x_size, self.channel_size, self.z_size,
self.probs_size])
a0 = ('x', range(self.x_size))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = 'z'
a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size))
self.tensor = tensor
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.a2_resolved = ('z', self.z_size)
self.a3 = a3
self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0})
self.x_probs_lt = ops.select(self.x_probs_lt, {'channel': 'red'})
self.channel_probs_lt = core.slice_function(self.original_lt, {'x': 3,
'z': 0})
class SelectTest(Base):
def test_name(self):
select_lt = ops.select(self.original_lt, {'channel': 'green'})
self.assertIn('lt_select', select_lt.name)
def test_scalar(self):
select_lt = ops.select(self.original_lt, {'channel': 'green'})
golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :], [self.a0, self.a2,
self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice(self):
select_lt = ops.select(self.original_lt, {'channel': slice('red', 'green')})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slices(self):
select_lt = ops.select(self.original_lt, {'x': slice(1, 4),
'channel': slice('green', None)})
a0_sliced = ('x', range(1, 5))
a1_sliced = ('channel', ['green', 'blue'])
golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :],
[a0_sliced, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list(self):
select_lt = ops.select(self.original_lt, {'channel': ['red', 'green']})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list_one_item(self):
select_lt = ops.select(self.original_lt, {'channel': ['red']})
a1_sliced = ('channel', ['red'])
golden_lt = core.LabeledTensor(self.tensor[:, :1, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list_zero_items(self):
select_lt = ops.select(self.original_lt, {'channel': []})
golden_lt = core.LabeledTensor(self.tensor[:, :0, :, :],
[self.a0, 'channel', self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_scalars(self):
select_lt = ops.select(self.original_lt, {'x': 1, 'channel': 'green'})
golden_lt = core.LabeledTensor(self.tensor[1, 1, :, :],
[self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.select(self.original_lt, {'foo': 1})
with self.assertRaises(ValueError):
ops.select(self.original_lt, {'z': 1})
with self.assertRaises(KeyError):
ops.select(self.original_lt, {'channel': 'purple'})
with self.assertRaises(KeyError):
ops.select(self.original_lt, {'channel': ['red', 'purple']})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': ['red'], 'x': [1]})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': ['red'], 'x': 1})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': slice('red', 'green', 2)})
class ConcatTest(Base):
def setUp(self):
super(ConcatTest, self).setUp()
self.red_lt = ops.select(self.original_lt, {'channel': ['red']})
self.green_lt = ops.select(self.original_lt, {'channel': ['green']})
self.blue_lt = ops.select(self.original_lt, {'channel': ['blue']})
def test_name(self):
concat_lt = ops.concat([self.red_lt, self.blue_lt], 'channel')
self.assertIn('lt_concat', concat_lt.name)
def test(self):
concat_lt = ops.concat([self.red_lt, self.green_lt], 'channel')
golden_lt = ops.select(self.original_lt, {'channel': ['red', 'green']})
self.assertLabeledTensorsEqual(concat_lt, golden_lt)
def test_transposed(self):
green_transposed = core.transpose(self.green_lt,
['probs', 'channel', 'z', 'x'])
with self.assertRaises(ValueError):
ops.concat([self.red_lt, green_transposed], 'channel')
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.concat([], 'channel')
with self.assertRaises(ValueError):
ops.concat([self.red_lt, self.red_lt], 'channel')
with self.assertRaises(ValueError):
ops.concat([self.red_lt, self.red_lt], 'foo')
class PackTest(Base):
def test_name(self):
pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch')
self.assertIn('lt_pack', pack_lt.name)
def test(self):
pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch')
golden_lt = core.LabeledTensor(
tf.stack([self.original_lt.tensor, self.original_lt.tensor]),
['batch', self.a0, self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(pack_lt, golden_lt)
def test_axis(self):
pack_lt = ops.pack([self.original_lt, self.original_lt],
new_axis='batch',
axis_position=4)
golden_lt = core.LabeledTensor(
tf.stack(
[self.original_lt.tensor, self.original_lt.tensor], axis=4),
[self.a0, self.a1, self.a2, self.a3, 'batch'])
self.assertLabeledTensorsEqual(pack_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.pack([self.original_lt, self.original_lt], 'channel')
class UnpackTest(Base):
def test_name(self):
unpack_lts = ops.unpack(self.original_lt)
for t in unpack_lts:
self.assertIn('lt_unpack', t.name)
def test(self):
unpack_lt = ops.unpack(self.original_lt)[0]
golden_lt = core.LabeledTensor(
tf.unstack(self.original_lt.tensor)[0], [self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def test_axis(self):
unpack_lt = ops.unpack(self.original_lt, axis_name='z')[0]
golden_lt = core.LabeledTensor(
tf.unstack(
self.original_lt.tensor, axis=2)[0], [self.a0, self.a1, self.a3])
self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.unpack(self.original_lt, axis_name='not_found')
class ReshapeTest(Base):
def test_name(self):
reshape_lt = ops.reshape(self.original_lt, ['channel'], ['foo'])
self.assertIn('lt_reshape', reshape_lt.name)
def test_identity(self):
reshape_lt = ops.reshape(self.original_lt, self.original_lt.axes.keys(),
self.original_lt.axes.values())
self.assertLabeledTensorsEqual(reshape_lt, self.original_lt)
def test_known_size(self):
new_dim_size = self.channel_size * self.z_size * self.probs_size
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
[('new_dim', new_dim_size)])
golden_lt = core.LabeledTensor(
tf.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], 'new_dim'])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_unknown_size(self):
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
['new_dim'])
golden_lt = core.LabeledTensor(
tf.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], 'new_dim'])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_unknown_dimension(self):
orig_lt = core.LabeledTensor(tf.placeholder(tf.float32, [None]), ['x'])
reshape_lt = ops.reshape(orig_lt, ['x'], ['y', ('z', 1)])
self.assertEqual(reshape_lt.axes, core.Axes([('y', None), ('z', 1)]))
with self.test_session() as sess:
result = sess.run(reshape_lt, feed_dict={orig_lt.tensor: [1, 2]})
np.testing.assert_array_equal(result, [[1], [2]])
def test_with_labels(self):
new_dim_size = self.channel_size * self.z_size * self.probs_size
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
[('new_dim', range(new_dim_size))])
golden_lt = core.LabeledTensor(
tf.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], ('new_dim', range(new_dim_size))])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'not contained in the set'):
ops.reshape(self.original_lt, ['foo'], ['bar'])
with self.assertRaisesRegexp(core.AxisOrderError,
'not a slice of axis names'):
ops.reshape(self.original_lt, ['probs', 'z'], ['bar'])
with self.assertRaisesRegexp(ValueError, 'at most one axis in new_axes'):
ops.reshape(self.original_lt, ['probs'], ['foo', 'bar'])
class RenameAxisTest(Base):
def test_name(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo')
self.assertIn('lt_rename_axis', rename_axis_lt.name)
def test_identity(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'channel')
self.assertLabeledTensorsEqual(rename_axis_lt, self.original_lt)
def test_new_name(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo')
expected_axes = [(name if name != 'channel' else 'foo', axis.value)
for name, axis in self.original_lt.axes.items()]
expected_lt = core.LabeledTensor(self.original_lt.tensor, expected_axes)
self.assertLabeledTensorsEqual(rename_axis_lt, expected_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'not contained in the set'):
ops.rename_axis(self.original_lt, 'foo', 'bar')
class BatchTest(Base):
def setUp(self):
super(BatchTest, self).setUp()
tensors = []
for i in range(10):
offset_lt = core.LabeledTensor(tf.constant(i), [])
tensors.append(core.add(self.original_lt, offset_lt))
self.pack_lt = ops.pack(tensors, 'batch')
def test_name(self):
batch_ops = ops.batch([self.pack_lt, self.pack_lt],
batch_size=2,
enqueue_many=True)
for bo in batch_ops:
self.assertIn('lt_batch', bo.name)
def test_enqueue_many(self):
[batch_2_op] = ops.batch([self.pack_lt], batch_size=2, enqueue_many=True)
self.assertEqual(len(batch_2_op.axes['batch']), 2)
[batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True)
self.assertLabeledTensorsEqual(self.pack_lt, batch_10_op)
def test_no_enqueue_many(self):
[batch_2_op] = ops.batch([self.original_lt], batch_size=2)
self.assertEqual(len(batch_2_op.axes['batch']), 2)
[batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True)
self.assertLabeledTensorsEqual(
ops.pack(10 * [self.original_lt], 'batch'), batch_10_op)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.batch([self.original_lt], 3, enqueue_many=True)
def test_allow_smaller_final_batch(self):
[batch_2_op] = ops.batch([self.original_lt], batch_size=2,
allow_smaller_final_batch=True)
self.assertEqual(batch_2_op.axes['batch'].size, None)
class ShuffleBatchTest(Base):
def setUp(self):
super(ShuffleBatchTest, self).setUp()
tensors = []
for i in range(10):
offset_lt = core.LabeledTensor(tf.constant(i), [])
tensors.append(core.add(self.original_lt, offset_lt))
self.pack_lt = ops.pack(tensors, 'batch')
def test_name(self):
batch_lts = ops.shuffle_batch([self.pack_lt, self.pack_lt],
batch_size=2,
enqueue_many=True)
for blt in batch_lts:
self.assertIn('lt_shuffle_batch', blt.name)
def test_enqueue_many(self):
[batch_2_lt] = ops.shuffle_batch([self.pack_lt],
batch_size=2,
enqueue_many=True,
min_after_dequeue=8,
seed=0)
self.assertEqual(len(batch_2_lt.axes['batch']), 2)
[batch_10_lt] = ops.batch([batch_2_lt], batch_size=10, enqueue_many=True)
self.assertEqual(batch_10_lt.axes, self.pack_lt.axes)
[batch_10, pack] = self.eval([batch_10_lt.tensor, self.pack_lt.tensor])
self.assertFalse((batch_10 == pack).all())
def test_allow_smaller_final_batch(self):
[batch_2_op] = ops.shuffle_batch([self.original_lt], batch_size=2,
allow_smaller_final_batch=True)
self.assertEqual(batch_2_op.axes['batch'].size, None)
class RandomCropTest(Base):
def test_name(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3})
self.assertIn('lt_random_crop', crop_lt.name)
def test_single(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3})
self.assertEqual(
core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 3)]),
crop_lt.axes)
def test_double(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3, 'channel': 2})
self.assertEqual(
core.Axes([self.a0, ('channel', 2), self.a2_resolved, ('probs', 3)]),
crop_lt.axes)
def test_size1(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 1})
self.assertEqual(
core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 1)]),
crop_lt.axes)
def test_different_seeds(self):
crop_0_lt = ops.random_crop(self.original_lt, {'probs': 3,
'channel': 2},
seed=0)
crop_1_lt = ops.random_crop(self.original_lt, {'probs': 3,
'channel': 2},
seed=1)
self.assertEqual(crop_0_lt.axes, crop_1_lt.axes)
[crop_0, crop_1] = self.eval([crop_0_lt.tensor, crop_1_lt.tensor])
self.assertFalse((crop_0 == crop_1).all())
def test_identical_seeds(self):
crop_0_lt = ops.random_crop(self.original_lt, {'probs': 3,
'channel': 2},
seed=0)
crop_1_lt = ops.random_crop(self.original_lt, {'probs': 3,
'channel': 2},
seed=0)
self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt)
def test_crop_idempotent(self):
crop_0_lt = ops.random_crop(self.original_lt, {'probs': 3,
'channel': 2},
seed=0)
crop_1_lt = ops.random_crop(crop_0_lt, {'probs': 3, 'channel': 2}, seed=1)
self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.random_crop(self.original_lt, {'foobar': 2})
class MapFnTest(Base):
def test_name(self):
map_lt = ops.map_fn(core.identity, self.original_lt)
self.assertIn('lt_map_fn', map_lt.name)
def test_identity(self):
map_lt = ops.map_fn(core.identity, self.original_lt)
self.assertLabeledTensorsEqual(map_lt, self.original_lt)
def test_callable_object(self):
class Identity(object):
def __call__(self, other):
return other
map_lt = ops.map_fn(Identity(), self.original_lt)
self.assertLabeledTensorsEqual(map_lt, self.original_lt)
def test_slice(self):
map_lt = ops.map_fn(lambda t: core.slice_function(t, {'channel': 1}),
self.original_lt)
slice_lt = core.slice_function(self.original_lt, {'channel': 1})
self.assertLabeledTensorsEqual(map_lt, slice_lt)
class SqueezeTest(Base):
def setUp(self):
super(SqueezeTest, self).setUp()
self.squeezable_lt = core.slice_function(self.original_lt,
{'channel': slice(0, 1),
'probs': slice(0, 1)})
def test_name(self):
squeeze_lt = ops.squeeze(self.squeezable_lt)
self.assertIn('lt_squeeze', squeeze_lt.name)
def test_none(self):
none_lt = ops.squeeze(self.squeezable_lt, None)
axes_lt = ops.squeeze(self.squeezable_lt, ['channel', 'probs'])
self.assertLabeledTensorsEqual(none_lt, axes_lt)
def test(self):
squeeze_lt = ops.squeeze(self.squeezable_lt, ['probs'])
golden_lt = core.slice_function(self.squeezable_lt, {'probs': 0})
self.assertLabeledTensorsEqual(squeeze_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.squeeze(self.original_lt, ['channel'])
with self.assertRaises(ValueError):
ops.squeeze(self.squeezable_lt, ['foo'])
class MatMulTest(Base):
def test_name(self):
x_lt = core.LabeledTensor(tf.ones((3,)), ['x'])
matmul_lt = ops.matmul(x_lt, x_lt)
self.assertIn('lt_matmul', matmul_lt.name)
def test_vector_vector(self):
x_lt = core.LabeledTensor(tf.range(3), ['x'])
matmul_lt = ops.matmul(x_lt, x_lt)
golden_lt = core.convert_to_labeled_tensor(5)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_matrix_vector(self):
xy_lt = core.LabeledTensor(tf.reshape(tf.range(6), (2, 3)), ['x', 'y'])
y_lt = core.LabeledTensor(tf.range(3), ['y'])
matmul_lt = ops.matmul(xy_lt, y_lt)
golden_lt = core.LabeledTensor(
tf.matmul(xy_lt.tensor, tf.reshape(y_lt.tensor, (-1, 1)))[:, 0], ['x'])
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(y_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_matrix_matrix(self):
xy_lt = core.LabeledTensor(tf.reshape(tf.range(6), (2, 3)), ['x', 'y'])
yz_lt = core.LabeledTensor(tf.reshape(tf.range(12), (3, 4)), ['y', 'z'])
matmul_lt = ops.matmul(xy_lt, yz_lt)
golden_lt = core.LabeledTensor(
tf.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z'])
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
transpose = lambda x: core.transpose(x, list(x.axes.keys())[::-1])
matmul_lt = ops.matmul(xy_lt, transpose(yz_lt))
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(transpose(xy_lt), yz_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(transpose(xy_lt), transpose(yz_lt))
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(yz_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, transpose(golden_lt))
def test_matrix_matrix_axis_order(self):
xy_lt = core.LabeledTensor(tf.reshape(tf.range(6), (2, 3)), ['x', 'y'])
yz_lt = core.LabeledTensor(tf.reshape(tf.range(12), (3, 4)), ['y', 'z'])
golden_lt = core.LabeledTensor(
tf.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z'])
with core.axis_order_scope(['x', 'y', 'z']):
matmul_lt = ops.matmul(xy_lt, yz_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(yz_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_invalid(self):
scalar_lt = core.LabeledTensor(tf.ones(()), [])
x_lt = core.LabeledTensor(tf.ones((2,)), ['x'])
x2_lt = core.LabeledTensor(tf.ones((3,)), ['x'])
y_lt = core.LabeledTensor(tf.ones((3,)), ['y'])
xy_lt = core.LabeledTensor(tf.ones((2, 3)), ['x', 'y'])
xyz_lt = core.LabeledTensor(tf.ones((2, 3, 1)), ['x', 'y', 'z'])
with self.assertRaisesRegexp(ValueError, 'inputs with at least rank'):
ops.matmul(x_lt, scalar_lt)
with self.assertRaises(NotImplementedError):
ops.matmul(x_lt, xyz_lt)
with self.assertRaisesRegexp(ValueError, 'exactly one axis in common'):
ops.matmul(x_lt, y_lt)
with self.assertRaises(NotImplementedError):
ops.matmul(xy_lt, xy_lt)
with self.assertRaisesRegexp(ValueError, 'does not match'):
ops.matmul(x_lt, x2_lt)
class ReduceSumTest(Base):
def test_name(self):
sum_lt = ops.reduce_sum(self.original_lt, {'channel'})
self.assertIn('lt_reduce_sum', sum_lt.name)
def test_drop_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
tf.reduce_sum(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_drop_scalar_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, 'channel')
golden_lt = core.LabeledTensor(
tf.reduce_sum(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_keep_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, {('channel', 'hihowareyou')})
golden_lt = core.LabeledTensor(
tf.reduce_sum(self.original_lt.tensor,
1, keep_dims=True),
[self.a0, ('channel', ['hihowareyou']), self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_keep_scalar_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, ('channel', 'hihowareyou'))
golden_lt = core.LabeledTensor(
tf.reduce_sum(self.original_lt.tensor,
1, keep_dims=True),
[self.a0, ('channel', ['hihowareyou']), self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_scalar(self):
scalar_lt = core.LabeledTensor(tf.constant(42), [])
reduce_lt = ops.reduce_sum(scalar_lt, [])
self.assertLabeledTensorsEqual(reduce_lt, scalar_lt)
def test_empty_list(self):
reduce_lt = ops.reduce_sum(self.original_lt, [])
self.assertLabeledTensorsEqual(reduce_lt, self.original_lt)
def test_none(self):
sum_lt = ops.reduce_sum(self.original_lt)
golden_lt = core.LabeledTensor(tf.reduce_sum(self.original_lt.tensor), [])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_function_docstring_and_name(self):
self.assertIn('tf.reduce_sum', ops.reduce_sum.__doc__)
self.assertEqual('reduce_sum', ops.reduce_sum.__name__)
class ReduceMeanTest(Base):
def test_name(self):
actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
self.assertIn('lt_reduce_mean', actual_lt.name)
def test(self):
actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
tf.reduce_mean(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(actual_lt, golden_lt)
class ReduceProdTest(Base):
def test_name(self):
result_lt = ops.reduce_prod(self.original_lt, {'channel'})
self.assertIn('lt_reduce_prod', result_lt.name)
def test(self):
result_lt = ops.reduce_prod(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
tf.reduce_prod(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceMinTest(Base):
def test_name(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
self.assertIn('lt_reduce_min', result_lt.name)
def test(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
tf.reduce_min(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceMaxTest(Base):
def test_name(self):
result_lt = ops.reduce_max(self.original_lt, {'channel'})
self.assertIn('lt_reduce_max', result_lt.name)
def test(self):
result_lt = ops.reduce_max(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
tf.reduce_max(self.original_lt.tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class BaseReduceBoolean(Base):
def setUp(self):
super(BaseReduceBoolean, self).setUp()
self.bool_tensor = tf.cast(self.original_lt.tensor > 5, tf.bool)
self.bool_lt = core.LabeledTensor(self.bool_tensor, self.original_lt.axes)
class ReduceAllTest(BaseReduceBoolean):
def test_name(self):
result_lt = ops.reduce_all(self.bool_lt, {'channel'})
self.assertIn('lt_reduce_all', result_lt.name)
def test(self):
result_lt = ops.reduce_all(self.bool_lt, {'channel'})
golden_lt = core.LabeledTensor(
tf.reduce_all(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceAnyTest(BaseReduceBoolean):
def test_name(self):
result_lt = ops.reduce_any(self.bool_lt, {'channel'})
self.assertIn('lt_reduce_any', result_lt.name)
def test(self):
result_lt = ops.reduce_any(self.bool_lt, {'channel'})
golden_lt = core.LabeledTensor(
tf.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class TileTest(Base):
def test_name(self):
tile_lt = ops.tile(self.original_lt, {'z': 2})
self.assertIn('lt_tile', tile_lt.name)
def test(self):
for multiple in [2, tf.constant(2)]:
tile_lt = ops.tile(self.original_lt, {'z': multiple})
golden_op = tf.tile(self.original_lt.tensor, [1, 1, multiple, 1])
golden_axes = ['z' if axis.name == 'z' else axis
for axis in self.original_lt.axes.values()]
golden_lt = core.LabeledTensor(golden_op, golden_axes)
self.assertLabeledTensorsEqual(tile_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'are not contained in the set'):
ops.tile(self.original_lt, {'foo': 5})
with self.assertRaisesRegexp(ValueError, 'axes with tick labels'):
ops.tile(self.original_lt, {'x': 5})
class PadTest(Base):
def test_name(self):
pad_lt = ops.pad(self.original_lt, {'x': (1, 1),
'channel': ([], ['alpha'])})
self.assertIn('lt_pad', pad_lt.name)
def test(self):
pad_lt = ops.pad(self.original_lt, {'x': (1, 1),
'channel': ([], ['alpha'])})
golden_op = tf.pad(self.original_lt.tensor, [[1, 1], [0, 1], [0, 0],
[0, 0]])
golden_axes = [('x', self.x_size + 2),
('channel', ['red', 'green', 'blue', 'alpha']), self.a2,
self.a3]
golden_lt = core.LabeledTensor(golden_op, golden_axes)
self.assertLabeledTensorsEqual(pad_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'are not contained in the set'):
ops.pad(self.original_lt, {'foo': (1, 1), 'channel': ([], ['alpha'])})
class ConstantTest(Base):
def test_name(self):
constant_lt = ops.constant(1)
self.assertIn('lt_constant', constant_lt.name)
def test_scalar(self):
constant_lt = ops.constant(1)
golden_lt = core.LabeledTensor(tf.constant(1), [])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_infer_shape(self):
constant_lt = ops.constant([1, 2], axes=['x'])
golden_lt = core.LabeledTensor(tf.constant([1, 2]), ['x'])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_specify_shape(self):
constant_lt = ops.constant(1, axes=[('x', 3)])
golden_lt = core.LabeledTensor(tf.constant(1, shape=(3,)), ['x'])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_existing_axes(self):
golden_lt = core.LabeledTensor(tf.constant([1, 2]), ['x'])
constant_lt = ops.constant([1, 2], axes=golden_lt.axes)
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
class ZerosLikeTest(Base):
def test_name(self):
like_lt = ops.zeros_like(self.original_lt)
self.assertIn('lt_zeros_like', like_lt.name)
def test(self):
like_lt = ops.zeros_like(self.original_lt)
golden_lt = core.LabeledTensor(
tf.zeros_like(self.original_lt.tensor), self.original_lt.axes)
self.assertLabeledTensorsEqual(like_lt, golden_lt)
class OnesLikeTest(Base):
def test_name(self):
like_lt = ops.ones_like(self.original_lt)
self.assertIn('lt_ones_like', like_lt.name)
def test(self):
like_lt = ops.ones_like(self.original_lt)
golden_lt = core.LabeledTensor(
tf.ones_like(self.original_lt.tensor), self.original_lt.axes)
self.assertLabeledTensorsEqual(like_lt, golden_lt)
class CastTest(Base):
def test_name(self):
cast_lt = ops.cast(self.original_lt, tf.float16)
self.assertIn('lt_cast', cast_lt.name)
def test(self):
cast_lt = ops.cast(self.original_lt, tf.float16)
golden_lt = core.LabeledTensor(
tf.cast(self.original_lt.tensor, tf.float16), self.original_lt.axes)
self.assertLabeledTensorsEqual(cast_lt, golden_lt)
class VerifyTensorAllFiniteTest(Base):
def setUp(self):
super(VerifyTensorAllFiniteTest, self).setUp()
self.finite_lt = core.LabeledTensor(tf.constant(42.0), [])
self.nan_lt = core.LabeledTensor(tf.constant(np.nan), [])
self.checked_finite_lt = ops.verify_tensor_all_finite(self.finite_lt, '')
self.checked_nan_lt = ops.verify_tensor_all_finite(self.nan_lt, '')
def test_name(self):
self.assertIn('lt_verify_tensor_all_finite', self.checked_finite_lt.name)
self.assertIn('lt_verify_tensor_all_finite', self.checked_nan_lt.name)
def test_finite(self):
self.assertLabeledTensorsEqual(self.finite_lt, self.checked_finite_lt)
def test_nan(self):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'Tensor had NaN values'):
self.eval([self.checked_nan_lt])
class BooleanMaskTest(Base):
def test_name(self):
mask = core.LabeledTensor(tf.range(7) > 3, [self.a0])
masked_lt = ops.boolean_mask(self.original_lt, mask)
self.assertIn('lt_boolean_mask', masked_lt.name)
def test(self):
mask = core.LabeledTensor(tf.range(7) > 3, [self.a0])
masked_lt = ops.boolean_mask(self.original_lt, mask)
golden_lt = core.LabeledTensor(
tf.boolean_mask(self.original_lt.tensor, mask.tensor),
['x', self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(masked_lt, golden_lt)
def test_invalid_rank(self):
mask = core.LabeledTensor(tf.ones((7, 3)) > 3, [self.a0, self.a1])
with self.assertRaises(NotImplementedError):
ops.boolean_mask(self.original_lt, mask)
def test_mismatched_axis(self):
mask = core.LabeledTensor(tf.range(7) > 3, ['foo'])
with self.assertRaisesRegexp(ValueError, 'not equal'):
ops.boolean_mask(self.original_lt, mask)
class WhereTest(Base):
def test_name(self):
condition = core.LabeledTensor(tf.range(5) < 3, ['x'])
where_lt = ops.where(condition, condition, condition)
self.assertIn('lt_where', where_lt.name)
def test(self):
condition = core.LabeledTensor(tf.range(5) < 3, ['x'])
x = core.LabeledTensor(tf.ones(5), ['x'])
y = core.LabeledTensor(tf.zeros(5), ['x'])
where_lt = ops.where(condition, x, y)
golden_lt = core.LabeledTensor(
tf.concat(0, [tf.ones(3), tf.zeros(2)]), ['x'])
self.assertLabeledTensorsEqual(where_lt, golden_lt)
def test_mismatched_axes(self):
condition = core.LabeledTensor(tf.range(5) < 3, ['x'])
with self.assertRaisesRegexp(ValueError, 'equal axes'):
ops.where(condition, condition[:3], condition)
with self.assertRaisesRegexp(ValueError, 'equal axes'):
ops.where(condition, condition, condition[:3])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
felix9064/python | Demo/liaoxf/listcompr.py | 1 | 1180 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 列表生成式
# 例1:使用列表生成式,将列表list1中的元素转小写,并过滤list1中的非字符串元素
# isinstance是Python内置函数,用于判断给定对象是否是给定的类型
list1 = ['Hello', 'World', 18, 'Apple', None]
list2 = [s.lower() for s in list1 if isinstance(s, str)]
print(list2)
if list2 == ['hello', 'world', 'apple']:
print('测试通过!')
else:
print('测试失败!')
# 例2:使用列表生成式,生成一个2n+1的数字列表,n为从3到11的数字
list3 = [2*n + 1 for n in range(3, 12)]
print(list3)
# 例3:过滤出一个指定的数字列表中值大于20的元素
L = [3, 7, 11, 14, 19, 33, 26, 57, 99]
list4 = [x for x in L if x > 20]
print(list4)
# 例4:计算两个集合的全排列,并将结果作为保存至一个新的列表中
L1 = ['香蕉', '苹果', '橙子']
L2 = ['可乐', '牛奶']
list5 = [(x, y) for x in L1 for y in L2]
print(list5)
# 例5:将一个字典转换成由一组元组组成的列表,元组的格式为(key, value)
D = {'Tom': 15, 'Jerry': 18, 'Peter': 13}
list6 = [(k, v) for k, v in D.items()]
print(list6)
| mit |
seem-sky/kbengine | kbe/src/lib/python/Lib/idlelib/SearchEngine.py | 70 | 7483 | '''Define SearchEngine for search dialogs.'''
import re
from tkinter import StringVar, BooleanVar, TclError
import tkinter.messagebox as tkMessageBox
def get(root):
'''Return the singleton SearchEngine instance for the process.
The single SearchEngine saves settings between dialog instances.
If there is not a SearchEngine already, make one.
'''
if not hasattr(root, "_searchengine"):
root._searchengine = SearchEngine(root)
# This creates a cycle that persists until root is deleted.
return root._searchengine
class SearchEngine:
"""Handles searching a text widget for Find, Replace, and Grep."""
def __init__(self, root):
'''Initialize Variables that save search state.
The dialogs bind these to the UI elements present in the dialogs.
'''
self.root = root # need for report_error()
self.patvar = StringVar(root, '') # search pattern
self.revar = BooleanVar(root, False) # regular expression?
self.casevar = BooleanVar(root, False) # match case?
self.wordvar = BooleanVar(root, False) # match whole word?
self.wrapvar = BooleanVar(root, True) # wrap around buffer?
self.backvar = BooleanVar(root, False) # search backwards?
# Access methods
def getpat(self):
return self.patvar.get()
def setpat(self, pat):
self.patvar.set(pat)
def isre(self):
return self.revar.get()
def iscase(self):
return self.casevar.get()
def isword(self):
return self.wordvar.get()
def iswrap(self):
return self.wrapvar.get()
def isback(self):
return self.backvar.get()
# Higher level access methods
def setcookedpat(self, pat):
"Set pattern after escaping if re."
# called only in SearchDialog.py: 66
if self.isre():
pat = re.escape(pat)
self.setpat(pat)
def getcookedpat(self):
pat = self.getpat()
if not self.isre(): # if True, see setcookedpat
pat = re.escape(pat)
if self.isword():
pat = r"\b%s\b" % pat
return pat
def getprog(self):
"Return compiled cooked search pattern."
pat = self.getpat()
if not pat:
self.report_error(pat, "Empty regular expression")
return None
pat = self.getcookedpat()
flags = 0
if not self.iscase():
flags = flags | re.IGNORECASE
try:
prog = re.compile(pat, flags)
except re.error as what:
args = what.args
msg = args[0]
col = args[1] if len(args) >= 2 else -1
self.report_error(pat, msg, col)
return None
return prog
def report_error(self, pat, msg, col=-1):
# Derived class could override this with something fancier
msg = "Error: " + str(msg)
if pat:
msg = msg + "\nPattern: " + str(pat)
if col >= 0:
msg = msg + "\nOffset: " + str(col)
tkMessageBox.showerror("Regular expression error",
msg, master=self.root)
def search_text(self, text, prog=None, ok=0):
'''Return (lineno, matchobj) or None for forward/backward search.
This function calls the right function with the right arguments.
It directly return the result of that call.
Text is a text widget. Prog is a precompiled pattern.
The ok parameteris a bit complicated as it has two effects.
If there is a selection, the search begin at either end,
depending on the direction setting and ok, with ok meaning that
the search starts with the selection. Otherwise, search begins
at the insert mark.
To aid progress, the search functions do not return an empty
match at the starting position unless ok is True.
'''
if not prog:
prog = self.getprog()
if not prog:
return None # Compilation failed -- stop
wrap = self.wrapvar.get()
first, last = get_selection(text)
if self.isback():
if ok:
start = last
else:
start = first
line, col = get_line_col(start)
res = self.search_backward(text, prog, line, col, wrap, ok)
else:
if ok:
start = first
else:
start = last
line, col = get_line_col(start)
res = self.search_forward(text, prog, line, col, wrap, ok)
return res
def search_forward(self, text, prog, line, col, wrap, ok=0):
wrapped = 0
startline = line
chars = text.get("%d.0" % line, "%d.0" % (line+1))
while chars:
m = prog.search(chars[:-1], col)
if m:
if ok or m.end() > col:
return line, m
line = line + 1
if wrapped and line > startline:
break
col = 0
ok = 1
chars = text.get("%d.0" % line, "%d.0" % (line+1))
if not chars and wrap:
wrapped = 1
wrap = 0
line = 1
chars = text.get("1.0", "2.0")
return None
def search_backward(self, text, prog, line, col, wrap, ok=0):
wrapped = 0
startline = line
chars = text.get("%d.0" % line, "%d.0" % (line+1))
while 1:
m = search_reverse(prog, chars[:-1], col)
if m:
if ok or m.start() < col:
return line, m
line = line - 1
if wrapped and line < startline:
break
ok = 1
if line <= 0:
if not wrap:
break
wrapped = 1
wrap = 0
pos = text.index("end-1c")
line, col = map(int, pos.split("."))
chars = text.get("%d.0" % line, "%d.0" % (line+1))
col = len(chars) - 1
return None
def search_reverse(prog, chars, col):
'''Search backwards and return an re match object or None.
This is done by searching forwards until there is no match.
Prog: compiled re object with a search method returning a match.
Chars: line of text, without \n.
Col: stop index for the search; the limit for match.end().
'''
m = prog.search(chars)
if not m:
return None
found = None
i, j = m.span() # m.start(), m.end() == match slice indexes
while i < col and j <= col:
found = m
if i == j:
j = j+1
m = prog.search(chars, j)
if not m:
break
i, j = m.span()
return found
def get_selection(text):
'''Return tuple of 'line.col' indexes from selection or insert mark.
'''
try:
first = text.index("sel.first")
last = text.index("sel.last")
except TclError:
first = last = None
if not first:
first = text.index("insert")
if not last:
last = first
return first, last
def get_line_col(index):
'''Return (line, col) tuple of ints from 'line.col' string.'''
line, col = map(int, index.split(".")) # Fails on invalid index
return line, col
if __name__ == "__main__":
import unittest
unittest.main('idlelib.idle_test.test_searchengine', verbosity=2, exit=False)
| lgpl-3.0 |
wbond/asn1crypto | tests/test_csr.py | 2 | 5871 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import unittest
import sys
import os
from asn1crypto import csr, util
from ._unittest_compat import patch
patch()
if sys.version_info < (3,):
byte_cls = str
num_cls = long # noqa
else:
byte_cls = bytes
num_cls = int
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
class CSRTests(unittest.TestCase):
def test_parse_csr(self):
with open(os.path.join(fixtures_dir, 'test-inter-der.csr'), 'rb') as f:
certification_request = csr.CertificationRequest.load(f.read())
cri = certification_request['certification_request_info']
self.assertEqual(
'v1',
cri['version'].native
)
self.assertEqual(
util.OrderedDict([
('country_name', 'US'),
('state_or_province_name', 'Massachusetts'),
('locality_name', 'Newbury'),
('organization_name', 'Codex Non Sufficit LC'),
('organizational_unit_name', 'Testing Intermediate'),
('common_name', 'Will Bond'),
('email_address', 'will@codexns.io'),
]),
cri['subject'].native
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'rsa'),
('parameters', None),
]),
cri['subject_pk_info']['algorithm'].native
)
self.assertEqual(
24141757533938720807477509823483015516687050697622322097001928034085434547050399731881871694642845241206788286795830006142635608141713689209738431462004600429798152826994774062467402648660593454536565119527837471261495586474194846971065722669734666949739228862107500673350843489920495869942508240779131331715037662761414997889327943217889802893638175792326783316531272170879284118280173511200768884738639370318760377047837471530387161553030663446359575963736475504659902898072137674205021477968813148345198711103071746476009234601299344030395455052526948041544669303473529511160643491569274897838845918784633403435929, # noqa
cri['subject_pk_info']['public_key'].parsed['modulus'].native
)
self.assertEqual(
65537,
cri['subject_pk_info']['public_key'].parsed['public_exponent'].native
)
self.assertEqual(
[],
cri['attributes'].native
)
def test_parse_csr2(self):
with open(os.path.join(fixtures_dir, 'test-third-der.csr'), 'rb') as f:
certification_request = csr.CertificationRequest.load(f.read())
cri = certification_request['certification_request_info']
self.assertEqual(
'v1',
cri['version'].native
)
self.assertEqual(
util.OrderedDict([
('country_name', 'US'),
('state_or_province_name', 'Massachusetts'),
('locality_name', 'Newbury'),
('organization_name', 'Codex Non Sufficit LC'),
('organizational_unit_name', 'Test Third-Level Certificate'),
('common_name', 'Will Bond'),
('email_address', 'will@codexns.io'),
]),
cri['subject'].native
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'rsa'),
('parameters', None),
]),
cri['subject_pk_info']['algorithm'].native
)
self.assertEqual(
24242772097421005542208203320016703216069397492249392798445262959177221203301502279838173203064357049006693856302147277901773700963054800321566171864477088538775137040886151390015408166478059887940234405152693144166884492162723776487601158833605063151869850475289834250129252480954724818505034734280077580919995584375189497366089269712298471489896645221362055822887892887126082288043106492130176555423739906252380437817155678204772878611148787130925042126257401487070141904017757131876614711613405231164930930771261221451019736883391322299033324412671768599041417705072563016759224152503535867541947310239343903761461, # noqa
cri['subject_pk_info']['public_key'].parsed['modulus'].native
)
self.assertEqual(
65537,
cri['subject_pk_info']['public_key'].parsed['public_exponent'].native
)
self.assertEqual(
[
util.OrderedDict([
('type', 'extension_request'),
(
'values',
[
[
util.OrderedDict([
('extn_id', 'basic_constraints'),
('critical', False),
(
'extn_value',
util.OrderedDict([
('ca', False),
('path_len_constraint', None),
])
),
]),
util.OrderedDict([
('extn_id', 'key_usage'),
('critical', False),
(
'extn_value',
set(['digital_signature', 'non_repudiation', 'key_encipherment']),
),
])
]
]
),
]),
],
cri['attributes'].native
)
| mit |
ran5515/DeepDecision | tensorflow/python/training/sync_replicas_optimizer_test.py | 32 | 11519 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sync_replicas_optimizer.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import training
# Creates the workers and return their sessions, graphs, train_ops.
def get_workers(num_workers, replicas_to_aggregate, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
with ops.device("/job:ps/task:0"):
global_step = variables.Variable(0, name="global_step", trainable=False)
var_0 = variables.Variable(0.0, name="v0")
with ops.device("/job:ps/task:1"):
var_1 = variables.Variable(1.0, name="v1")
var_sparse = variables.Variable([[3.0], [4.0]], name="v_sparse")
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(0.1 + worker_id * 0.2)
grads_1 = constant_op.constant(0.9 + worker_id * 0.2)
# This is to test against sparse gradients.
grads_sparse = ops.IndexedSlices(
constant_op.constant(
[0.1 + worker_id * 0.2], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_opt = gradient_descent.GradientDescentOptimizer(2.0)
sync_rep_opt = training.SyncReplicasOptimizer(
sgd_opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers)
train_op = [
sync_rep_opt.apply_gradients(
zip([grads_0, grads_1, grads_sparse],
[var_0, var_1, var_sparse]),
global_step=global_step)
]
sync_replicas_hook = sync_rep_opt.make_session_run_hook(
is_chief, num_tokens=num_workers)
# Creates MonitoredSession
session = training.MonitoredTrainingSession(
master=workers[worker_id].target,
is_chief=is_chief,
hooks=[sync_replicas_hook])
sessions.append(session)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class SyncReplicasOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test2Workers(self):
num_workers = 2
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_0 = graphs[0].get_tensor_by_name("v0:0")
var_1_g_0 = graphs[0].get_tensor_by_name("v1:0")
local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0")
self.assertAllEqual(0.0, sessions[0].run(var_0_g_0))
self.assertAllEqual(1.0, sessions[0].run(var_1_g_0))
self.assertAllEqual(0, sessions[0].run(local_step_0))
# Will just use session 1 to verify all the variables later.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1))
# We have initial tokens in the queue so we can call this one by one. After
# the first step, this will no longer work as there will be no more extra
# tokens in the queue.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# The global step should have been updated and the variables should now have
# the new values after the average of the gradients are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]],
sessions[1].run(var_sparse_g_1))
# The local step for both workers should still be 0 because the initial
# tokens in the token queue are 0s. This means that the following
# computation of the gradients will be wasted as local_step is smaller than
# the current global step. However, this only happens once when the system
# just starts and this is necessary to make the system robust for the case
# when chief gets restarted by errors/preemption/...
self.assertAllEqual(0, sessions[0].run(local_step_0))
self.assertAllEqual(0, sessions[1].run(local_step_1))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. The variables are still the same.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[0].run(local_step_0))
self.assertAllEqual(1, sessions[1].run(local_step_1))
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
# At this step, the token queue is empty. So the 2 workers need to work
# together to proceed.
threads = []
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0])))
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1])))
# The two workers starts to execute the train op.
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# The global step should now be 2 and the gradients should have been
# applied twice.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
# 3 workers and one of them is backup.
def test3Workers1Backup(self):
num_workers = 3
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initilized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
# We have initial tokens in the queue so we can call this one by one. After
# the token queue becomes empty, they should be called concurrently.
# Here worker 0 and worker 2 finished first.
sessions[0].run(train_ops[0])
sessions[2].run(train_ops[2])
# The global step should have been updated since we only need to collect 2
# gradients. The variables should now have the new values after the average
# of the gradients from worker 0/2 are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1))
# Worker 1 finished later and its gradients will now be dropped as it is
# stale.
sessions[1].run(train_ops[1])
# As shown in the previous test, the local_step for all workers should be
# still 0 so their next computation will also be dropped.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[2].run(train_ops[2])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. Just check worker 1 as an example.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[1].run(local_step_1))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
# Lets worker 0 execute first.
# It will wait as we need 2 workers to finish this step and the global step
# should be still 1.
thread_0.start()
self.assertAllEqual(1, sessions[1].run(global_step))
# Starts worker 1.
thread_1.start()
thread_1.join()
thread_0.join()
# The global step should now be 2 and the gradients should have been
# applied again.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
class SyncReplicasOptimizerHookTest(test.TestCase):
def testErrorIfUsedBeforeMinimizeCalled(self):
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
with self.assertRaisesRegexp(ValueError,
"apply_gradient should be called"):
hook.begin()
def testCanCreatedBeforeMinimizeCalled(self):
"""This behavior is required to be integrated with Estimators."""
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
v = variables.Variable([0.])
global_step = variables.Variable(0, name="global_step", trainable=False)
opt.minimize(v, global_step=global_step)
hook.begin()
if __name__ == "__main__":
test.main()
| apache-2.0 |
specialkevin/ansible-modules-core | cloud/openstack/os_security_group.py | 109 | 4265 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_security_group
short_description: Add/Delete security groups from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty)"
version_added: "2.0"
description:
- Add or Remove security groups from an OpenStack cloud.
options:
name:
description:
- Name that has to be given to the security group. This module
requires that security group names be unique.
required: true
description:
description:
- Long description of the purpose of the security group
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Create a security group
- os_security_group:
cloud=mordred
state=present
name=foo
description=security group for foo servers
# Update the existing 'foo' security group description
- os_security_group:
cloud=mordred
state=present
name=foo
description=updated description for the foo security group
'''
def _needs_update(module, secgroup):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
if secgroup['description'] != module.params['description']:
return True
return False
def _system_state_change(module, secgroup):
state = module.params['state']
if state == 'present':
if not secgroup:
return True
return _needs_update(module, secgroup)
if state == 'absent' and secgroup:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
description = module.params['description']
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup))
changed = False
if state == 'present':
if not secgroup:
secgroup = cloud.create_security_group(name, description)
changed = True
else:
if _needs_update(module, secgroup):
secgroup = cloud.update_security_group(
secgroup['id'], description=description)
changed = True
module.exit_json(
changed=changed, id=secgroup['id'], secgroup=secgroup)
if state == 'absent':
if secgroup:
cloud.delete_security_group(secgroup['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
| gpl-3.0 |
qwang2505/ssdb-source-comments | deps/cpy/engine.py | 26 | 13165 | # encoding=utf-8
#################################
# Author: ideawu
# Link: http://www.ideawu.net/
#################################
import sys, os, shutil, datetime
import antlr3
import antlr3.tree
from ExprLexer import ExprLexer
from ExprParser import ExprParser
class CpyEngine:
found_files = set()
def find_imports(self, srcfile, base_dir):
#print ' file', srcfile
srcfile = os.path.realpath(srcfile)
if srcfile in self.found_files:
return set()
self.found_files.add(srcfile)
fp = open(srcfile, 'rt')
lines = fp.readlines()
fp.close()
imports = []
for line in lines:
if line.find('import') == -1:
continue
line = line.strip().strip(';');
ps = line.split();
if ps[0] != 'import':
continue
for p in ps[ 1 :]:
p = p.strip(',')
imports.append(p);
for p in imports:
#print 'import ' + p
self.find_files(p, base_dir);
return self.found_files
def find_files(self, member, base_dir):
ps = member.split('.')
last = ps.pop(-1)
path = base_dir + '/' + '/'.join(ps)
if last == '*':
if os.path.isdir(path):
fs = os.listdir(path)
for f in fs:
if f.endswith('.cpy'):
file = os.path.realpath(path + '/' + f)
self.find_imports(file, path)
else:
file = path + '/' + last + '.cpy'
if os.path.isfile(file):
self.find_imports(file, path)
def compile(self, srcfile, base_dir, output_dir):
srcfile = os.path.realpath(srcfile)
base_dir = os.path.realpath(base_dir)
output_dir = os.path.realpath(output_dir)
files = self.find_imports(srcfile, base_dir)
files.remove(srcfile)
if len(files) > 0:
files = list(files)
files.sort()
#print ' ' + '\n '.join(files)
shead, stail = os.path.split(srcfile)
slen = len(shead)
for f in files:
head, tail = os.path.split(f)
rel_dir = head[slen :]
self._compile(f, base_dir, output_dir + rel_dir)
dstfile = self._compile(srcfile, base_dir, output_dir)
return dstfile
def _compile(self, srcfile, base_dir, output_dir):
head, tail = os.path.split(srcfile)
dstfile = os.path.normpath(output_dir + '/' + tail.split('.')[0] + '.py')
if os.path.exists(dstfile):
src_mtime = os.path.getmtime(srcfile)
dst_mtime = os.path.getmtime(dstfile)
#print src_mtime, dst_mtime
if src_mtime < dst_mtime:
return dstfile
#print 'compile: %-30s=> %s' % (srcfile, dstfile)
#print 'compile: %-30s=> %s' % (srcfile[len(base_dir)+1:], dstfile[len(base_dir)+1:])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir + '/__init__.py'):
fp = open(output_dir + '/__init__.py', 'w')
fp.close()
#fp = codecs.open(sys.argv[1], 'r', 'utf-8')
fp = open(srcfile, 'r')
char_stream = antlr3.ANTLRInputStream(fp)
lexer = ExprLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = ExprParser(tokens)
r = parser.prog()
# this is the root of the AST
root = r.tree
#print (root.toStringTree())
#print '-------'
nodes = antlr3.tree.CommonTreeNodeStream(root)
nodes.setTokenStream(tokens)
from Eval import Eval
eval = Eval(nodes)
#######################################
cpy = CpyBuilder(dstfile, base_dir, output_dir)
eval.prog(cpy)
return dstfile
class CpyBuilder:
compiled_files = set()
def __init__(self, dstfile, base_dir, output_dir):
self.vars = -1
self.if_depth = 0
self.block_depth = 0
self.switch_expr_stack = []
self.switch_continue_stack = []
self.class_stack = []
self.class_names = [];
self.constructed = False;
self.base_dir = base_dir
self.output_dir = output_dir
self.fp = open(dstfile, 'w')
self.write('# encoding=utf-8\n')
self.write('# Generated by cpy\n');
self.write('# ' + datetime.datetime.now().isoformat(' ') + '\n');
self.write('import os, sys\n')
self.write('from sys import stdin, stdout\n\n')
def tmp_var(self, name = ''):
self.vars += 1
return '_cpy_%s_%d' %(name, self.vars)
def close(self):
self.fp.close()
def write(self, text):
text = text.encode('utf-8')
self.fp.write(text)
# debug
#sys.stdout.write(text)
def indent(self):
return '\t' * self.block_depth
def _compile_dir(self, rel_path):
mods = []
files = os.listdir(self.base_dir + '/' + rel_path)
for f in files:
if f.endswith('.cpy'):
mods.append(f[0: -4])
if f.endswith('.py'):
mods.append(f[0: -3])
self._compile(rel_path, f)
return mods
def _compile(self, rel_path, f):
base_dir = os.path.normpath(self.base_dir + '/' + rel_path)
srcfile = os.path.normpath(base_dir + '/' + f)
output_dir = os.path.normpath(self.output_dir + '/' + rel_path)
if f.endswith('.py'):
head, tail = os.path.split(f)
#print 'copy: %-30s=> %s' % (srcfile, output_dir + '/' + tail)
shutil.copy(srcfile, output_dir + '/' + tail)
elif f.endswith('.cpy'):
if srcfile in self.compiled_files:
return
self.compiled_files.add(srcfile)
#e = CpyEngine()
#d = e.compile(srcfile, base_dir, output_dir)
def op_import(self, member, all):
ps = member.split('.')
package = []
while True:
if len(ps) == 0:
break
p = ps.pop(0)
package.append(p)
rel_path = '/'.join(package);
path = self.base_dir + '/' + rel_path
if os.path.isdir(path):
if len(ps) == 0:
mods = self._compile_dir(rel_path)
if all == '*':
for m in mods:
self.write(self.indent())
self.write('from %s import %s\n' %(member, m))
else:
self.write(self.indent())
self.write('import %s\n' % member)
break
elif os.path.isfile(path + '.cpy') or os.path.isfile(path + '.py'):
filename = os.path.basename(path)
rel_path = '/'.join(package[ : -1]);
if os.path.isfile(path + '.cpy'):
self._compile(rel_path, filename + '.cpy')
else:
self._compile(rel_path, filename + '.py')
if len(ps) == 0:
if all == '*':
self.write(self.indent())
self.write('from %s import *\n' % member)
else:
self.write(self.indent())
self.write('import %s\n' % member)
break
elif len(ps) == 1:
mod = '.'.join(package)
cls = ps[-1]
self.write(self.indent())
self.write('from %s import %s\n' %(mod, cls))
else:
# error
print ("Cpy error: invalid module '%s'" % member)
sys.exit(0)
break
else:
self.write(self.indent())
if all == '*':
self.write('from %s import *\n' % member)
else:
ps = member.split('.')
if len(ps) == 1:
self.write('import %s\n' % member)
else:
self.write('from %s import %s\n' %('.'.join(ps[0 : -1]), ps[-1]))
break
def block_enter(self):
self.block_depth += 1
self.write(self.indent() + 'pass\n')
def block_leave(self):
self.block_depth -= 1
def if_enter(self):
self.write('\n')
self.write(self.indent())
self.if_depth += 1
def if_leave(self):
self.if_depth -= 1
def op_if(self, expr):
self.write('if %s:\n' % expr)
def op_else(self):
self.write(self.indent() + 'else:\n')
def op_else_if(self):
self.write(self.indent() + 'el')
def stmt(self, text):
self.write(self.indent() + text + '\n')
def op_assign(self, id, val, op):
text = '%s %s %s' % (id, op, val)
return text
def op_inc(self, id):
return id + ' += 1';
def op_dec(self, id):
return id + ' -= 1';
def op_call(self, text):
self.write(self.indent() + text + '\n')
def op_print(self, text):
self.write(self.indent())
self.write('print %s\n' % text)
def op_printf(self, format, text):
self.write(self.indent())
if text == None:
self.write('sys.stdout.write(%s)\n' % (format))
else:
self.write('sys.stdout.write(%s %% (%s))\n' % (format, text))
def op_while(self, expr):
self.write('\n')
self.write(self.indent())
self.write('while %s:\n' % expr)
def op_do_while_enter(self):
self.write('\n')
self.write(self.indent())
self.write('while True:\n')
def op_do_while_leave(self, expr):
self.write('\n')
self.block_depth += 1
self.write(self.indent())
self.write('if %s:\n' % expr)
self.block_depth += 1
self.write(self.indent())
self.write('continue')
self.block_depth -= 1
self.write('\n')
self.write(self.indent())
self.write('break')
self.block_depth -= 1
def op_switch_enter(self, expr):
self.write('\n')
self.switch_expr_stack.append(expr)
var = '_continue_%d' % len(self.switch_expr_stack)
self.switch_continue_stack.append(var)
self.write(self.indent() + '# {{{ switch: ' + expr + '\n')
self.write(self.indent())
self.write(var + ' = False\n')
self.write(self.indent())
self.write('while True:\n')
self.block_depth += 1
def op_switch_leave(self):
self.write(self.indent() + 'break\n')
var = self.switch_continue_stack[-1]
self.write(self.indent())
self.write('if %s:\n' % var)
self.block_depth += 1
self.write(self.indent())
self.write('continue\n')
self.block_depth -= 1
self.block_depth -= 1
self.write(self.indent() + '# }}} switch\n\n')
self.switch_expr_stack.pop()
self.switch_continue_stack.pop()
def op_case_enter(self):
self.write(self.indent())
self.write('if False')
self.block_depth += 1
def op_case_test(self, expr):
self.write(' or ((%s) == %s)' % (self.switch_expr_stack[-1], expr))
def op_case(self):
self.write(':\n')
self.write(self.indent())
self.write('pass\n')
def op_case_leave(self):
self.block_depth -= 1
def op_break(self):
self.write(self.indent())
self.write('break\n')
def op_continue(self):
if self.switch_expr_stack:
var = self.switch_continue_stack[-1]
self.write(self.indent())
self.write(var + ' = True\n')
self.write(self.indent())
self.write('break\n')
else:
self.write(self.indent())
self.write('continue\n')
def op_return(self, expr):
self.write(self.indent())
if expr == None: expr = ''
self.write('return %s\n' % expr)
def op_default_enter(self):
self.write(self.indent() + '### default\n')
def op_default_leave(self):
pass
def op_function(self, id, params):
self.write('\n')
if len(self.class_stack) > 0:
# in class
if params == None or params == '':
params = 'this'
else:
params = 'this, ' + params
else:
if params == None:
params = ''
self.write(self.indent() + 'def ' + id + '(' + params + '):\n')
def op_foreach(self, expr, k, vals):
self.write('\n')
tmp_var_ref = self.tmp_var('r')
tmp_var_l = self.tmp_var('l')
tmp_var_k = self.tmp_var('k')
tmp_var_is_dict = self.tmp_var('b')
self.write(self.indent())
self.write('%s = %s = %s\n' %(tmp_var_ref, tmp_var_l, expr))
self.write(self.indent())
self.write('if type(%s).__name__ == \'dict\': %s=True; %s=%s.iterkeys()\n' %(tmp_var_ref, tmp_var_is_dict, tmp_var_l, tmp_var_ref))
self.write(self.indent())
self.write('else: %s=False;' %tmp_var_is_dict)
if k != None:
self.write('%s=-1' %k)
self.write('\n')
self.write(self.indent())
self.write('for %s in %s:\n' %(tmp_var_k, tmp_var_l))
if k == None:
self.block_depth += 1
self.write(self.indent())
self.write('if %s: %s=%s[%s]\n' %(tmp_var_is_dict, vals, tmp_var_ref, tmp_var_k))
self.write(self.indent())
self.write('else: %s=%s\n' %(vals, tmp_var_k))
self.block_depth -= 1
else:
self.block_depth += 1
self.write(self.indent())
self.write('if %s: %s=%s; %s=%s[%s]\n' %(tmp_var_is_dict, k, tmp_var_k, vals, tmp_var_ref, tmp_var_k))
self.write(self.indent())
self.write('else: %s += 1; %s=%s\n' %(k, vals, tmp_var_k))
self.block_depth -= 1
def op_throw(self, expr):
self.write(self.indent())
self.write('raise %s\n' % expr)
def op_try(self):
self.write(self.indent())
self.write('try:\n')
def op_catch(self, type, var):
self.write(self.indent())
if var == None:
self.write('except %s:\n' % type)
else:
self.write('except %s , %s:\n' %(type, var))
def op_finally(self):
self.write(self.indent())
self.write('finally:\n')
def op_class_enter(self, name, parent):
self.class_stack.append([])
self.class_names.append(name)
self.constructed = False;
self.parent = parent;
self.write(self.indent())
if parent == None:
self.write('class %s(object):\n' % name)
else:
self.write('class %s(%s):\n' % (name, parent))
self.block_depth += 1
self.write(self.indent())
self.write('pass\n')
def op_class_leave(self):
if not self.constructed:
self.op_construct('');
self.class_stack.pop()
self.class_names.pop()
self.write('\n')
self.block_depth -= 1
def op_var_def(self, is_static, id, val):
if is_static:
self.write(self.indent())
if val == None:
s = '%s = None' % id
else:
s = '%s = %s' % (id, val)
self.write(s)
else:
if val == None:
s = 'this.%s = None' % id
else:
s = 'this.%s = %s' % (id, val)
self.class_stack[-1].append(s)
def op_construct(self, params):
self.constructed = True;
self.write('\n')
self.op_function('__init__', params)
self.block_depth += 1
if self.parent and self.parent != 'object':
self.write(self.indent())
self.write('super(' + self.class_names[-1] + ', this).__init__(' + params + ')\n')
for s in self.class_stack[-1]:
self.write(self.indent())
self.write(s + '\n')
self.block_depth -= 1
| bsd-3-clause |
NetApp/manila | manila/tests/test_service.py | 3 | 7809 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 NetApp, Inc.
# Copyright 2014 Mirantis, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import ddt
import mock
from oslo_config import cfg
from manila import context
from manila import db
from manila import exception
from manila import manager
from manila import service
from manila import test
from manila import utils
from manila import wsgi
test_service_opts = [
cfg.StrOpt("fake_manager",
default="manila.tests.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"),
]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
"""Fake manager for tests."""
RPC_API_VERSION = "1.0"
def __init__(self, host=None, db_driver=None, service_name=None):
super(FakeManager, self).__init__(host=host, db_driver=db_driver)
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test', 'test', 'test', CONF.fake_manager)
serv.start()
self.assertEqual('manager', serv.test_method())
def test_override_manager_method(self):
serv = ExtendedService('test', 'test', 'test', CONF.fake_manager)
serv.start()
self.assertEqual('service', serv.test_method())
class ServiceFlagsTestCase(test.TestCase):
def test_service_enabled_on_create_based_on_flag(self):
self.flags(enable_new_services=True)
host = 'foo'
binary = 'manila-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assertFalse(ref['disabled'])
def test_service_disabled_on_create_based_on_flag(self):
self.flags(enable_new_services=False)
host = 'foo'
binary = 'manila-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assertTrue(ref['disabled'])
def fake_service_get_by_args(*args, **kwargs):
raise exception.NotFound()
def fake_service_get(*args, **kwargs):
raise Exception()
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {
'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
}
service_ref = {
'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': {'name': 'nova'},
'id': 1,
}
@ddt.ddt
class ServiceTestCase(test.TestCase):
"""Test cases for Services."""
def test_create(self):
app = service.Service.create(host='foo',
binary='manila-fake',
topic='fake')
self.assertTrue(app)
@ddt.data(True, False)
def test_periodic_tasks(self, raise_on_error):
serv = service.Service(host, binary, topic, CONF.fake_manager)
self.mock_object(
context,
'get_admin_context',
mock.Mock(side_effect=context.get_admin_context))
self.mock_object(serv.manager, 'periodic_tasks')
serv.periodic_tasks(raise_on_error=raise_on_error)
context.get_admin_context.assert_called_once_with()
serv.manager.periodic_tasks.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
raise_on_error=raise_on_error)
@mock.patch.object(service.db, 'service_get_by_args',
mock.Mock(side_effect=fake_service_get_by_args))
@mock.patch.object(service.db, 'service_create',
mock.Mock(return_value=service_ref))
@mock.patch.object(service.db, 'service_get',
mock.Mock(side_effect=fake_service_get))
def test_report_state_newly_disconnected(self):
serv = service.Service(host, binary, topic, CONF.fake_manager)
serv.start()
serv.report_state()
self.assertTrue(serv.model_disconnected)
service.db.service_get_by_args.assert_called_once_with(
mock.ANY, host, binary)
service.db.service_create.assert_called_once_with(
mock.ANY, service_create)
service.db.service_get.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(service.db, 'service_get_by_args',
mock.Mock(side_effect=fake_service_get_by_args))
@mock.patch.object(service.db, 'service_create',
mock.Mock(return_value=service_ref))
@mock.patch.object(service.db, 'service_get',
mock.Mock(return_value=service_ref))
@mock.patch.object(service.db, 'service_update',
mock.Mock(return_value=service_ref.
update({'report_count': 1})))
def test_report_state_newly_connected(self):
serv = service.Service(host, binary, topic, CONF.fake_manager)
serv.start()
serv.model_disconnected = True
serv.report_state()
self.assertFalse(serv.model_disconnected)
service.db.service_get_by_args.assert_called_once_with(
mock.ANY, host, binary)
service.db.service_create.assert_called_once_with(
mock.ANY, service_create)
service.db.service_get.assert_called_once_with(
mock.ANY, service_ref['id'])
service.db.service_update.assert_called_once_with(
mock.ANY, service_ref['id'], mock.ANY)
class TestWSGIService(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.mock_object(wsgi.Loader, 'load_app')
self.test_service = service.WSGIService("test_service")
def test_service_random_port(self):
self.assertEqual(0, self.test_service.port)
self.test_service.start()
self.assertNotEqual(0, self.test_service.port)
self.test_service.stop()
wsgi.Loader.load_app.assert_called_once_with("test_service")
def test_reset_pool_size_to_default(self):
self.test_service.start()
# Stopping the service, which in turn sets pool size to 0
self.test_service.stop()
self.assertEqual(0, self.test_service.server._pool.size)
# Resetting pool size to default
self.test_service.reset()
self.test_service.start()
self.assertEqual(1000, self.test_service.server._pool.size)
wsgi.Loader.load_app.assert_called_once_with("test_service")
| apache-2.0 |
epiqc/ScaffCC | llvm/utils/lit/lit/display.py | 2 | 4265 | import sys
def create_display(opts, tests, total_tests, workers):
if opts.quiet:
return NopDisplay()
of_total = (' of %d' % total_tests) if (tests != total_tests) else ''
header = '-- Testing: %d%s tests, %d workers --' % (tests, of_total, workers)
progress_bar = None
if opts.succinct and opts.useProgressBar:
import lit.ProgressBar
try:
tc = lit.ProgressBar.TerminalController()
progress_bar = lit.ProgressBar.ProgressBar(tc, header)
header = None
except ValueError:
progress_bar = lit.ProgressBar.SimpleProgressBar('Testing: ')
return Display(opts, tests, header, progress_bar)
class NopDisplay(object):
def print_header(self): pass
def update(self, test): pass
def clear(self, interrupted): pass
class Display(object):
def __init__(self, opts, tests, header, progress_bar):
self.opts = opts
self.tests = tests
self.header = header
self.progress_bar = progress_bar
self.completed = 0
def print_header(self):
if self.header:
print(self.header)
if self.progress_bar:
self.progress_bar.update(0.0, '')
def update(self, test):
self.completed += 1
show_result = test.isFailure() or \
self.opts.showAllOutput or \
(not self.opts.quiet and not self.opts.succinct)
if show_result:
if self.progress_bar:
self.progress_bar.clear(interrupted=False)
self.print_result(test)
if self.progress_bar:
if test.isFailure():
self.progress_bar.barColor = 'RED'
percent = float(self.completed) / self.tests
self.progress_bar.update(percent, test.getFullName())
def clear(self, interrupted):
if self.progress_bar:
self.progress_bar.clear(interrupted)
def print_result(self, test):
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.tests))
# Show the test failure output, if requested.
if (test.isFailure() and self.opts.showOutput) or \
self.opts.showAllOutput:
if test.isFailure():
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
out = test.result.output
# Encode/decode so that, when using Python 3.6.5 in Windows 10,
# print(out) doesn't raise UnicodeEncodeError if out contains
# special characters. However, Python 2 might try to decode
# as part of the encode call if out is already encoded, so skip
# encoding if it raises UnicodeDecodeError.
if sys.stdout.encoding:
try:
out = out.encode(encoding=sys.stdout.encoding,
errors="replace")
except UnicodeDecodeError:
pass
out = out.decode(encoding=sys.stdout.encoding)
print(out)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Report micro-tests, if present
if test.result.microResults:
items = sorted(test.result.microResults.items())
for micro_test_name, micro_test in items:
print("%s MICRO-TEST: %s" %
('*'*3, micro_test_name))
if micro_test.metrics:
sorted_metrics = sorted(micro_test.metrics.items())
for metric_name, value in sorted_metrics:
print(' %s: %s ' % (metric_name, value.format()))
# Ensure the output is flushed.
sys.stdout.flush()
| bsd-2-clause |
MTgeophysics/mtpy | tests/modeling/__init__.py | 1 | 2712 | from __future__ import print_function
import shutil
from difflib import unified_diff
import matplotlib
import os
import sys
from matplotlib import pyplot as plt
if os.name == "posix" and 'DISPLAY' not in os.environ:
print("MATPLOTLIB: No Display found, using non-interactive svg backend", file=sys.stderr)
matplotlib.use('svg')
import matplotlib.pyplot as plt
else:
# matplotlib.use('svg')
import matplotlib.pyplot as plt
plt.ion()
def diff_files(after, before, ignores=None):
"""
compare two files using diff
:param ignores:
:param before:
:param after:
:return: the number count of different lines
"""
with open(before) as f2p:
before_lines = f2p.readlines()
with open(after) as f1p:
after_lines = f1p.readlines()
before_lines = [line.strip() for line in before_lines]
after_lines = [line.strip() for line in after_lines]
if ignores:
for ignored_term in ignores:
before_lines = [line for line in before_lines if ignored_term not in line]
after_lines = [line for line in before_lines if ignored_term not in line]
msg = "Comparing {} and {}:\n".format(before, after)
lines = [line for line in unified_diff(
before_lines,
after_lines,
fromfile="baseline ({})".format(before),
tofile="test ({})".format(after),
n=0)]
if lines:
msg += " Found differences:\n\t" + "\n\t".join(lines)
is_identical = False
else:
msg += " NO differences found."
is_identical = True
return is_identical, msg
def _clean_recreate(adir):
if os.path.exists(adir):
# clear dir if it already exist
shutil.rmtree(adir)
os.mkdir(adir)
# def show_patcher(show_func):
# """
# patch the plt.show() if interactive is enabled to display and then close the plot after 1 second
# so plt.show() will not block the script and the figure is still visible to the user
# :param show_func:
# :return:
# """
#
# def new_show_func(*args, **kwargs):
# stuff = show_func(*args, **kwargs)
# # wait 1 second for the image to show on screen
# figManager = plt.gcf()
# if figManager is not None:
# canvas = figManager.canvas
# # if canvas.figure.stale:
# # canvas.draw()
# # show(block=False)
# try:
# canvas.start_event_loop(1) # wait time = 1
# except NotImplementedError:
# pass
# finally:
# pass
# plt.close()
# return stuff
#
# return new_show_func if plt.isinteractive() else show_func
| gpl-3.0 |
xujun10110/sleepy-puppy-1 | sleepypuppy/admin/capture/models.py | 13 | 2480 | # Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from sleepypuppy import db
from BeautifulSoup import BeautifulSoup as bs
class Capture(db.Model):
"""
Capture model contains the following parameters:
assessment = assessment name(s) assocaited with capture
url = url where cross-site scripting was triggered
referrer = referrer string of request
cookies = any cookies not containing the HttpOnly flag from request
user_agent = user-agent string
payload = to be removed
screenshot = screenshot identifier
pub_date = Date with which the capature was recieved
"""
__tablename__ = 'captures'
id = db.Column(db.Integer, primary_key=True)
assessment = db.Column(db.String(200))
url = db.Column(db.Text(), unique=False)
referrer = db.Column(db.Text(), unique=False)
cookies = db.Column(db.Text(), unique=False)
user_agent = db.Column(db.Text(), unique=False)
payload = db.Column(db.Integer)
screenshot = db.Column(db.String(20), unique=False)
pub_date = db.Column(db.String(512), unique=False)
dom = db.Column(db.Text(), unique=False)
def as_dict(self):
"""Return Capture model as JSON object"""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __init__(self, assessment, url, referrer, cookies, user_agent,
payload, screenshot, dom, pub_date=None):
self.assessment = assessment
self.url = url
self.referrer = referrer
self.cookies = cookies
self.user_agent = user_agent
self.payload = payload
self.screenshot = screenshot
self.dom = bs(dom).prettify()
# Set datetime when a capture is recieved
if pub_date is None:
pub_date = str(datetime.datetime.now())
self.pub_date = pub_date
def __repr__(self):
return '<Uri %r>' % self.url
| apache-2.0 |
tcstewar/parser | leftright.py | 1 | 3272 | D = 512
from vocab import Vocabulary
input_sentence = 'the dog ran'.split()
rules = [
('S', ['NP', 'VP']),
('VP', ['V']),
('NP', ['DET', 'N']),
]
words = {
'N': 'man dog'.split(),
'DET': 'a the'.split(),
'V': 'ran saw'.split(),
}
def label_word(s):
best = None
for w in words.values():
for ww in w:
c = s.dot(vocab.parse(ww))
if best is None or best<c:
best = c
best_word = ww
'word', best, best_word
if best > 0.7:
return best_word
return None
def label(s):
c = [s.dot(vocab.parse(rule[0])) for rule in rules]
if max(c)>0.7:
return rules[c.index(max(c))][0]
else:
return None
def print_tree(s, depth=0):
x = label(s)
if x is None:
x = label_word(s)
if x is not None:
print ' '*depth+x
return
print ' '*depth+label(s)
print_tree(s*vocab.parse('~L_'+x), depth+1)
print_tree(s*vocab.parse('~R_'+x), depth+1)
vocab = Vocabulary(D)
NEXT = vocab.parse('NEXT')
for category, items in words.items():
for item in items:
rules.append((category, [item]))
print rules
sp_goal = vocab.parse('S')
sp_tree = None
for input in input_sentence:
print 'parsing text:', input
sp_lex = vocab.parse(input)
while True:
print 'looking for rules with LC=', vocab.text(sp_lex, include_pairs=False, maximum_count=1)
if sp_lex.dot(sp_goal)>0.7:
print 'found goal', label(sp_goal)
if sp_tree is None:
print 'done'
break
pass
else:
#print 'merging'
#print 'lex:'
#print_tree(sp_lex, depth=2)
#print 'tree:'
#print_tree(sp_tree, depth=2)
sp_lex = sp_tree+vocab.parse('R_'+label(sp_tree))*sp_lex
#print 'result:'
#print_tree(sp_lex, depth=2)
sp_goal = sp_goal*~NEXT
sp_tree = None
print_tree(sp_lex)
else:
best_rule = None
best_match = None
for rule in rules:
LHR, RHS = rule
c = sp_lex.dot(vocab.parse(RHS[0]))
if best_rule is None or c>best_match:
best_rule = rule
best_match = c
if best_match > 0.7:
LHS, RHS = best_rule
print 'using rule', LHS, RHS, best_match
sp_lex = vocab.parse(LHS)+vocab.parse('L_'+LHS)*sp_lex
print_tree(sp_lex)
if len(RHS)>1:
sp_tree = sp_lex
sp_goal = sp_goal*NEXT+vocab.parse(RHS[1])
break
else:
print 'no rule to match'
break
print_tree(sp_lex)
| gpl-2.0 |
jpirko/lnst | lnst/Tests/TestPMD.py | 1 | 1828 | import logging
import subprocess
import signal
from lnst.Common.Parameters import Param, StrParam, IntParam, FloatParam
from lnst.Common.Parameters import IpParam, DeviceOrIpParam
from lnst.Tests.BaseTestModule import BaseTestModule, TestModuleError
class TestPMD(BaseTestModule):
coremask = StrParam(mandatory=True)
pmd_coremask = StrParam(mandatory=True)
#TODO make ListParam
nics = Param(mandatory=True)
peer_macs = Param(mandatory=True)
def format_command(self):
testpmd_args = ["testpmd",
"-c", self.params.coremask,
"-n", "4", "--socket-mem", "1024,0"]
for nic in self.params.nics:
testpmd_args.extend(["-w", nic])
testpmd_args.extend(["--", "-i", "--forward-mode", "mac",
"--coremask", self.params.pmd_coremask])
for i, mac in enumerate(self.params.peer_macs):
testpmd_args.extend(["--eth-peer", "{},{}".format(i, mac)])
return " ".join(testpmd_args)
def run(self):
cmd = self.format_command()
logging.debug("Running command \"{}\" as subprocess".format(cmd))
process = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
process.stdin.write(str.encode("start tx_first\n"))
process.stdin.flush()
self.wait_for_interrupt()
process.stdin.write(str.encode("stop\n"))
process.stdin.flush()
process.stdin.write(str.encode("quit\n"))
process.stdin.flush()
out, err = process.communicate()
self._res_data = {"stdout": out, "stderr": err}
return True
| gpl-2.0 |
google-code/ampatu | languages/pl.py | 2 | 6724 | # coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyrażeniem postaci "pole1=\'nowawartość\'". Nie możesz uaktualnić lub usunąć wyników z JOIN:',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': 'Wierszy usuniętych: %s',
'%s rows updated': 'Wierszy uaktualnionych: %s',
'911 Operator (calltaker)': '911 Operator (calltaker)',
'Action, scope, etc.': 'Action, scope, etc.',
'Actual Phone Transcript': 'Actual Phone Transcript',
'Arrested Adult': 'Arrested Adult',
'Arrested Minor': 'Arrested Minor',
'Arrived at': 'Arrived at',
'Authentication': 'Uwierzytelnienie',
'Available databases and tables': 'Dostępne bazy danych i tabele',
'Calling party name': 'Calling party name',
'Cannot be empty': 'Nie może być puste',
'Change Password': 'Zmień hasło',
'Check to delete': 'Zaznacz aby usunąć',
'Check to delete:': 'Zaznacz aby usunąć:',
'Client IP': 'IP klienta',
'Comments': 'Comments',
'Complete the form': 'Complete the form',
'Confirmed': 'Confirmed',
'Contact': 'Contact',
'Controller': 'Kontroler',
'Copyright': 'Copyright',
'Created by': 'Created by',
'Current request': 'Aktualne żądanie',
'Current response': 'Aktualna odpowiedź',
'Current session': 'Aktualna sesja',
'DB Model': 'Model bazy danych',
'Database': 'Baza danych',
'Delay': 'Delay',
'Delay in seconds (calculated)': 'Delay in seconds (calculated)',
'Delete:': 'Usuń:',
'Description': 'Opis',
'Dispatch date & time': 'Dispatch date & time',
'Dispatched at': 'Dispatched at',
'Dispatched by': 'Dispatched by',
'E-mail': 'Adres e-mail',
'Edit': 'Edycja',
'Edit Profile': 'Edytuj profil',
'Edit This App': 'Edytuj tę aplikację',
'Edit current record': 'Edytuj obecny rekord',
'Event': 'Event',
'First name': 'Imię',
'Function disabled': 'Funkcja wyłączona',
'Group ID': 'ID grupy',
'Hello World': 'Witaj Świecie',
'Id': 'Id',
'Import/Export': 'Importuj/eksportuj',
'Incident %s created!': 'Incident %s created!',
'Incident Type': 'Incident Type',
'Incident not saved! (correct errors!)': 'Incident not saved! (correct errors!)',
'Index': 'Indeks',
'Internal State': 'Stan wewnętrzny',
'Invalid Query': 'Błędne zapytanie',
'Invalid email': 'Błędny adres email',
'Invalid incident!': 'Invalid incident!',
'Last modification date & time': 'Last modification date & time',
'Last name': 'Nazwisko',
'Layout': 'Układ',
'Location': 'Location',
'Login': 'Zaloguj',
'Logout': 'Wyloguj',
'Lost Password': 'Przypomnij hasło',
'Main Menu': 'Menu główne',
'Main unit assigned': 'Main unit assigned',
'Medical': 'Medical',
'Menu Model': 'Model menu',
'Name': 'Nazwa',
'New Record': 'Nowy rekord',
'No databases in this application': 'Brak baz danych w tej aplikacji',
'On scence date & time': 'On scence date & time',
'Origin': 'Źródło',
'Password': 'Hasło',
"Password fields don't match": 'Pola hasła nie są zgodne ze sobą',
'Phone': 'Phone',
'Phone Operator': 'Phone Operator',
'Powered by': 'Zasilane przez',
'Preventable': 'Preventable',
'Priority classification': 'Priority classification',
'Query:': 'Zapytanie:',
'Record ID': 'ID rekordu',
'Register': 'Zarejestruj',
'Registration key': 'Klucz rejestracji',
'Relevant': 'Relevant',
'Reported by': 'Reported by',
'Role': 'Rola',
'Rows in table': 'Wiersze w tabeli',
'Rows selected': 'Wybrane wiersze',
'Seized Drugs': 'Seized Drugs',
'Seized Vehicles': 'Seized Vehicles',
'Seized Weapons': 'Seized Weapons',
'Severity': 'Severity',
'Source telephone number': 'Source telephone number',
'Start date & time': 'Start date & time',
'Started at': 'Started at',
'Status': 'Status',
'Street name, house number, direction (if any)': 'Street name, house number, direction (if any)',
'Stylesheet': 'Arkusz stylów',
'Submit': 'Wyślij',
'Sure you want to delete this object?': 'Czy na pewno chcesz usunąć ten obiekt?',
'Synopsis': 'Synopsis',
'Table name': 'Nazwa tabeli',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'wartość\'". Takie coś jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.',
'The output of the file is a dictionary that was rendered by the view': 'The output of the file is a dictionary that was rendered by the view',
'This is a copy of the scaffolding application': 'This is a copy of the scaffolding application',
'Timestamp': 'Znacznik czasu',
'Unit': 'Unit',
'Update:': 'Uaktualnij:',
'Updated at': 'Updated at',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Użyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapytań.',
'User %(id)s Registered': 'Użytkownik %(id)s został zarejestrowany',
'User ID': 'ID użytkownika',
'Verify Password': 'Potwierdź hasło',
'View': 'Widok',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Witaj w web2py',
'Which called the function': 'Which called the function',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url': 'You visited the url',
'Zone': 'Zone',
'appadmin is disabled because insecure channel': 'administracja aplikacji wyłączona z powodu braku bezpiecznego połączenia',
'cache': 'cache',
'change password': 'change password',
'click here for online examples': 'Kliknij aby przejść do interaktywnych przykładów',
'click here for the administrative interface': 'Kliknij aby przejść do panelu administracyjnego',
'customize me!': 'dostosuj mnie!',
'data uploaded': 'dane wysłane',
'database': 'baza danych',
'database %s select': 'wybór z bazy danych %s',
'db': 'baza danych',
'design': 'projektuj',
'documentation': 'documentation',
'done!': 'zrobione!',
'edit profile': 'edit profile',
'export as csv file': 'eksportuj jako plik csv',
'insert new': 'wstaw nowy rekord tabeli',
'insert new %s': 'wstaw nowy rekord do tabeli %s',
'invalid request': 'Błędne żądanie',
'located in the file': 'located in the file',
'login': 'login',
'logout': 'logout',
'new record inserted': 'nowy rekord został wstawiony',
'next 100 rows': 'następne 100 wierszy',
'or import from csv file': 'lub zaimportuj z pliku csv',
'previous 100 rows': 'poprzednie 100 wierszy',
'record': 'rekord',
'record does not exist': 'rekord nie istnieje',
'record id': 'id rekordu',
'register': 'register',
'selected': 'wybranych',
'state': 'stan',
'table': 'tabela',
'unable to parse csv file': 'nie można sparsować pliku csv',
}
| agpl-3.0 |
dominjune/LeetCode | 062 Minimum Path Sum.py | 3 | 1179 | """
Given a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum
of all numbers along its path.
Note: You can only move either down or right at any point in time.
"""
__author__ = 'Danyang'
class Solution:
def minPathSum(self, grid):
"""
dp
possible use A*
:param grid: a list of lists of integers
:return: integer
"""
if not grid:
return 0
row_cnt = len(grid)
col_cnt = len(grid[0])
dp = [[1<<31 for _ in xrange(col_cnt)] for _ in xrange(row_cnt)]
# dp[i][j] = min(dp[i-1][j], dp[i][j-1])+grid[i][j]
for i in xrange(row_cnt):
for j in xrange(col_cnt):
if i==0 and j==0:
dp[i][j] = grid[i][j]
elif i==0:
dp[i][j] = dp[i][j-1]+grid[i][j]
elif j==0:
dp[i][j] = dp[i-1][j]+grid[i][j]
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1])+grid[i][j] # PoP - Principle of Optimality
return dp[row_cnt-1][col_cnt-1] | mit |
lmazuel/ansible | lib/ansible/modules/network/openswitch/ops_config.py | 43 | 10660 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ops_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage OpenSwitch configuration using CLI
description:
- OpenSwitch configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ops configuration sections in
a deterministic way.
extends_documentation_fragment: openswitch
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: ['yes', 'no']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: netop
password: netop
---
- name: configure hostname over cli
ops_config:
lines:
- "hostname {{ inventory_hostname }}"
provider: "{{ cli }}"
- name: configure vlan 10 over cli
ops_config:
lines:
- no shutdown
parents:
- vlan 10
provider: "{{ cli }}"
- name: load config from file
ops_config:
src: ops01.cfg
backup: yes
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/ops_config.2016-07-16@22:28:34
"""
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.openswitch import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
def check_args(module, warnings):
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
def get_config(module, result):
contents = module.params['config']
if not contents:
contents = module.config.get_config()
return NetworkConfig(indent=4, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=4)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def load_config(module, commands, result):
if not module.check_mode:
module.config(commands)
result['changed'] = True
def run(module, result):
match = module.params['match']
replace = module.params['replace']
path = module.params['parents']
candidate = get_candidate(module)
if match != 'none':
config = get_config(module, result)
configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
module.config.load_config(commands)
result['changed'] = True
if module.params['save']:
if not module.check_mode:
module.config.save_config()
result['changed'] = True
def main():
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
save=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
# ops_config is only supported over Cli transport so force
# the value of transport to be cli
transport=dict(default='cli', choices=['cli'])
)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
module.params['match'] = 'none'
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
henridwyer/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
olorz/scrapy | tests/test_downloadermiddleware_redirect.py | 25 | 10022 | import unittest
from scrapy.downloadermiddlewares.redirect import RedirectMiddleware, MetaRefreshMiddleware
from scrapy.spiders import Spider
from scrapy.exceptions import IgnoreRequest
from scrapy.http import Request, Response, HtmlResponse
from scrapy.utils.test import get_crawler
class RedirectMiddlewareTest(unittest.TestCase):
def setUp(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('foo')
self.mw = RedirectMiddleware.from_crawler(self.crawler)
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = Response('http://a.com', headers={'Location': 'http://a.com/redirected'}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_redirect_301(self):
def _test(method):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, method=method)
rsp = Response(url, headers={'Location': url2}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, method)
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
_test('GET')
_test('POST')
_test('HEAD')
def test_dont_redirect(self):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, meta={'dont_redirect': True})
rsp = Response(url, headers={'Location': url2}, status=301)
r = self.mw.process_response(req, rsp, self.spider)
assert isinstance(r, Response)
assert r is rsp
# Test that it redirects when dont_redirect is False
req = Request(url, meta={'dont_redirect': False})
rsp = Response(url2, status=200)
r = self.mw.process_response(req, rsp, self.spider)
assert isinstance(r, Response)
assert r is rsp
def test_redirect_302(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_redirect_302_head(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='HEAD')
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'HEAD')
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/302')
rsp = Response('http://scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = Response('http://www.scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = Response('http://scrapytest.org/first', headers={'Location': '/redirected'}, status=302)
req2 = self.mw.process_response(req1, rsp1, self.spider)
rsp2 = Response('http://scrapytest.org/redirected', headers={'Location': '/redirected2'}, status=302)
req3 = self.mw.process_response(req2, rsp2, self.spider)
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
def test_spider_handling(self):
smartspider = self.crawler._create_spider('smarty')
smartspider.handle_httpstatus_list = [404, 301, 302]
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url)
rsp = Response(url, headers={'Location': url2}, status=301)
r = self.mw.process_response(req, rsp, smartspider)
self.assertIs(r, rsp)
class MetaRefreshMiddlewareTest(unittest.TestCase):
def setUp(self):
crawler = get_crawler(Spider)
self.spider = crawler._create_spider('foo')
self.mw = MetaRefreshMiddleware.from_crawler(crawler)
def _body(self, interval=5, url='http://example.org/newpage'):
return """<html><head><meta http-equiv="refresh" content="{0};url={1}"/></head></html>"""\
.format(interval, url)
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_meta_refresh(self):
req = Request(url='http://example.org')
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
def test_meta_refresh_with_high_interval(self):
# meta-refresh with high intervals don't trigger redirects
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org', body=self._body(interval=1000))
rsp2 = self.mw.process_response(req, rsp, self.spider)
assert rsp is rsp2
def test_meta_refresh_trough_posted_request(self):
req = Request(url='http://example.org', method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/max')
rsp = HtmlResponse(req.url, body=self._body())
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = HtmlResponse(req.url, body=self._body())
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = HtmlResponse(req1.url, body=self._body(url='/redirected'))
req2 = self.mw.process_response(req1, rsp1, self.spider)
assert isinstance(req2, Request), req2
rsp2 = HtmlResponse(req2.url, body=self._body(url='/redirected2'))
req3 = self.mw.process_response(req2, rsp2, self.spider)
assert isinstance(req3, Request), req3
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
NaturalGIS/naturalgis_qgis | python/plugins/processing/algs/grass7/ext/v_in_geonames.py | 45 | 1246 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_in_geonames.py
----------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def processCommand(alg, parameters, context, feedback):
# v.in.geonames needs to use WGS84 projection
alg.commands.append('g.proj -c epsg=4326')
# Launch the algorithm
alg.processCommand(parameters, context, feedback)
| gpl-2.0 |
ubgarbage/gae-blog | django/core/servers/fastcgi.py | 289 | 6402 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
from django.utils import importlib
import sys, os
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default fcgi)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default prefork).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads.
minspare=NUMBER min number of spare processes / threads.
maxchildren=NUMBER hard limit number of processes / threads.
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing.
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
"""
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
def fastcgi_help(message=None):
print FASTCGI_HELP
if message:
print message
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError, e:
print >> sys.stderr, "ERROR: %s" % e
print >> sys.stderr, " Unable to load the flup package. In order to run django"
print >> sys.stderr, " as a FastCGI application, you will need to get flup from"
print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already"
print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH."
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except:
print "Can't import flup." + flup_module
return False
# Prep up and go
from django.core.handlers.wsgi import WSGIHandler
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
fp = open(options["pidfile"], "w")
fp.write("%d\n" % os.getpid())
fp.close()
WSGIServer(WSGIHandler(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| bsd-3-clause |
achang97/YouTunes | lib/python2.7/site-packages/flask/blueprints.py | 169 | 16872 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, ``None``
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprints.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None,
root_path=None):
_PackageBoundObject.__init__(self, import_name, template_folder,
root_path=root_path)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Non-decorator version of the :meth:`errorhandler` error attach
function, akin to the :meth:`~flask.Flask.register_error_handler`
application-wide function of the :class:`~flask.Flask` object but
for error handlers limited to this blueprint.
.. versionadded:: 0.11
"""
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
| mit |
alberthdev/pyradmon | pyradmon/wrapper.py | 1 | 17646 | #!/usr/bin/env python
# PyRadmon - Python Radiance Monitoring Tool
# Copyright 2014 Albert Huang.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Main Wrapper Library -
# library for combining everything together!
#
import sys
from core import *
import args
import config
import config_printer
import enumerate as enum
from config import *
from enumerate import enumerate
from data import get_data, get_data_columns, post_data_columns, rel_channels, SPECIAL_FIELDS
from plot import plot, subst_data
import dummymp
try:
# Should be embedded
from prettytable import PrettyTable
except:
print "ERROR: PrettyTable is needed to run this script!"
global old_avail
old_avail = 0
def report_status(total_completed, total_running, total_procs):
global old_avail
info("[%.2f%%] %i/%i completed (%i running)" % ((total_completed / (total_procs + 0.0)) * 100, total_completed, total_procs, total_running))
if old_avail != dummymp.config.CPU_AVAIL:
info("CPU availability changed to %i/%i CPUs!" % (dummymp.config.CPU_AVAIL, dummymp.getTotalCPUs()))
old_avail = dummymp.config.CPU_AVAIL
def main():
parser = args.make_argparser()
parse = parser.parse_args()
(pyradmon_config, plot_dict, parse_data) = args.parse_to_config(parse)
if not pyradmon_config:
sys.exit(1)
###################################################################
## VERB ACTION CODE
###################################################################
## Config verb + Config args, part 2
if parse.verb == "config":
if isset_obj("config_display", parse) and parse.config_display:
# Print configuration
config_printer.display(pyradmon_config, plot_dict)
if isset_obj("config_save", parse):
# Print configuration
config.save(parse.config_save, pyradmon_config, plot_dict)
# That's it! Exit...
sys.exit(0)
# Everything else gets pretty involved!
if parse.verb == "plot" or parse.verb == "dump" or parse.verb == "list":
if parse.verb == "dump" or parse.verb == "list":
enum_opts_dict = config.postprocess_config(pyradmon_config)
if "data_columns" in enum_opts_dict:
data_var_list = enum_opts_dict["data_columns"]
else:
data_var_list = []
else:
(enum_opts_dict, data_var_list) = config.postprocess(pyradmon_config, plot_dict)
#pprinter(enum_opts_dict)
if "custom_vars" in enum_opts_dict:
custom_vars = enum_opts_dict["custom_vars"]
else:
custom_vars = None
if "make_dirs" in enum_opts_dict:
make_dirs = True
else:
make_dirs = False
info(" ** Enumerating data files...")
(en, stats) = enumerate(**enum_opts_dict)
if not "data_path_format" in enum_opts_dict:
warn("No data_path_format specified in configuration. Will use preset default instead.")
warn("(Preset default: %s)" % enum.DATA_PATH_FORMAT)
enum_opts_dict["data_path_format"] = enum.DATA_PATH_FORMAT
if stats["criteria_total_files"] == 0:
critical("No data found for specified criteria!")
sys.exit(1)
#pprinter(en)
if parse.verb == "plot" or parse.verb == "dump":
if "data_all_channels" in pyradmon_config and pyradmon_config["data_all_channels"]:
info(" ** Fetching data for ALL channels...")
all_channels = True
chans = ""
else:
if "data_channels" not in enum_opts_dict:
critical("ERROR: Data channels were not specified!")
critical("If you wish to use all channels, enable the all channels option.")
sys.exit(1)
chans = enum_opts_dict["data_channels"]
all_channels = False
info(" ** Fetching data for channel %s..." % (chans[0] if len(chans) == 1 else \
" and ".join(chans) if len(chans) == 2 else \
(", ".join(chans[:-1]) + ", and " + chans[-1])))
if "data_assim_only" in pyradmon_config and pyradmon_config["data_assim_only"]:
data_assim_only = True
else:
data_assim_only = False
if "data_suppress_warnings" in pyradmon_config and pyradmon_config["data_suppress_warnings"]:
data_suppress_warnings = True
else:
data_suppress_warnings = False
if parse.verb == "dump":
tmp_columns = get_data_columns(en)
columns = post_data_columns(tmp_columns)
new_columns = []
if "data_type" in enum_opts_dict:
for prefix in enum_opts_dict['data_type'].split("|"):
for column in columns:
if column in SPECIAL_FIELDS:
if column not in new_columns:
new_columns.append(column)
else:
new_columns.append(prefix + "|" + column)
columns = new_columns
else:
warn("No data type specified - will use ges by default.")
for column in columns:
if column in SPECIAL_FIELDS:
new_columns.append(column)
else:
new_columns.append("ges|" + column)
data_var_list = columns
dat = get_data(en, data_var_list, gen_channel_list(chans), enum_opts_dict["data_path_format"], all_channels, data_assim_only, data_suppress_warnings)
else:
dat = get_data(en, data_var_list, gen_channel_list(chans), enum_opts_dict["data_path_format"], all_channels, data_assim_only, data_suppress_warnings)
# If we're reading all channels, set the channel list.
if all_channels:
chans = [str(k) for k in dat.keys()]
if parse.verb == "list":
#pprinter(stats)
start_date_str = "%s%s%s_%sz" % (str(stats["start_year"]).zfill(4), \
str(stats["start_month"]).zfill(2), \
str(stats["start_day"]).zfill(2), \
str(stats["start_hour"]).zfill(2))
end_date_str = "%s%s%s_%sz" % (str(stats["end_year"]).zfill(4), \
str(stats["end_month"]).zfill(2), \
str(stats["end_day"]).zfill(2), \
str(stats["end_hour"]).zfill(2))
maxlen = len("Files matching instrument/sat and data_type:") + 1
outstrs = []
outstr = "| " + "Data range:".ljust(maxlen) + "%s - %s" % (start_date_str, end_date_str)
outstrs.append(outstr)
# TODO: correct this measurement so that it reflects actual
# interval, since interval is not necessarily measured per
# hour!
outstr = "| " + "Average interval:".ljust(maxlen) + "%s hrs" % (str(stats["average_interval"]))
outstrs.append(outstr)
outstr = "| " + "Total files:".ljust(maxlen) + "%i files" % (stats["total_files"])
outstrs.append(outstr)
outstr = "| " + "Files matching instrument/sat and data_type:".ljust(maxlen) + "%i files" % (stats["criteria_total_files"])
outstrs.append(outstr)
outstr = "| " + "Available data types:".ljust(maxlen) + ", ".join(stats["available_data_type"])
outstrs.append(outstr)
outstr = "| " + "Available instrument/sats:".ljust(maxlen) + ", ".join(stats["available_instrument_sat"])
outstrs.append(outstr)
maxtotallen = max([len(x) for x in outstrs])
outstrs_final = [ x.ljust(maxtotallen)+" |" for x in outstrs ]
print "=" * (maxtotallen + 2)
print "| Data Information".ljust(maxtotallen) + " |"
print "=" * (maxtotallen + 2)
for outstr in outstrs_final:
print outstr
print "=" * (maxtotallen + 2)
sys.exit(0)
if parse.verb == "dump":
#pprinter(dat)
# DIRTY HACK ALERT! DIRTY HACK ALERT!
# We don't know whether the dat dictionary is a single
# channel dictionary or a multi channel one.
# The hack? Just check to see if the first key is an int or
# not. If it is, it's probably multi-channel. If not, single.
# (This technically should work everytime since keys for single
# channel tend to only be strings, not ints. And vice versa.)
#
# TODO: fix this by adding a key/value pair inside the dict
# to indicate this!
if type(dat.keys()[0]) == int:
# Multichanel mode
dat_sorted = sortODShallow(dat)
# Iterate channels!
for chan in dat_sorted.keys():
# Print channel!
print "=" * 20
print "Channel %i:" % chan
print "=" * 20
# Set up headers!
table = PrettyTable(dat[chan].keys())
for key in dat[chan].keys():
table.align[key] = "l" # Left align city names
table.padding_width = 1 # One space between column edges and contents (default)
# Quick validation!
data_length = -1
for key in dat[chan].keys():
if data_length == -1:
data_length = len(dat[chan][key]) if dat[chan][key] else 0
else:
if not (("iuse" in dat[chan]) and (type(dat[chan]["iuse"]) == int) and (dat[chan]["iuse"] < 0)):
if (type(dat[chan][key]) == list) and (len(dat[chan][key]) != data_length):
critical("ERROR: Data length is not consistant across all keys! (Multi-channel mode - current %i vs first %i)" % (len(dat[chan][key]), data_length))
#print dat[chan][key]
sys.exit(1)
# OK, we're good!
for i in xrange(0, data_length):
data_arr = []
for key in dat[chan].keys():
if type(dat[chan][key]) == list:
data_arr.append(dat[chan][key][i])
else:
data_arr.append(dat[chan][key])
table.add_row(data_arr)
print table
else:
# Single channel mode
# Set up headers!
table = PrettyTable(dat.keys())
for key in dat.keys():
table.align[key] = "l" # Left align city names
table.padding_width = 1 # One space between column edges and contents (default)
# Quick validation!
data_length = 0
for key in dat.keys():
if data_length == 0:
data_length = len(dat[key])
else:
if (type(dat[key]) == list) and (len(dat[key]) != data_length):
critical("ERROR: Data length is not consistant across all keys! (Single channel mode - current %i vs first %i)" % (len(dat[key]), data_length))
sys.exit(1)
# OK, we're good!
for i in xrange(0, data_length):
data_arr = []
for key in dat.keys():
if type(dat[key]) == list:
data_arr.append(dat[key][i])
else:
data_arr.append(dat[key])
table.add_row(data_arr)
print table
sys.exit(0)
if parse.verb == "plot":
if ("mp_disable" in pyradmon_config) and (pyradmon_config["mp_disable"]):
info("Multiprocessing (mp) is disabled, processing in order...")
else:
# Disable deepcopy - we'll handle it ourselves!
dummymp.set_args_deepcopy(False)
if "mp_priority_mode" in pyradmon_config:
if pyradmon_config["mp_priority_mode"] == "GENEROUS":
info("Multiprocessing (mp) priority mode set to GENEROUS.")
dummymp.set_priority_mode(dummymp.DUMMYMP_GENEROUS)
elif pyradmon_config["mp_priority_mode"] == "NORMAL":
info("Multiprocessing (mp) priority mode set to NORMAL.")
dummymp.set_priority_mode(dummymp.DUMMYMP_NORMAL)
elif pyradmon_config["mp_priority_mode"] == "AGGRESSIVE":
info("Multiprocessing (mp) priority mode set to AGGRESSIVE.")
dummymp.set_priority_mode(dummymp.DUMMYMP_AGGRESSIVE)
elif pyradmon_config["mp_priority_mode"] == "EXTREME":
info("Multiprocessing (mp) priority mode set to EXTREME.")
dummymp.set_priority_mode(dummymp.DUMMYMP_EXTREME)
elif pyradmon_config["mp_priority_mode"] == "NUCLEAR":
info("Multiprocessing (mp) priority mode set to NUCLEAR.")
dummymp.set_priority_mode(dummymp.DUMMYMP_NUCLEAR)
else:
die("ERROR: Invalid multiprocesing (mp) priority mode detected - this may be a bug!")
if "mp_cpu_limit" in pyradmon_config:
info("Multiprocessing (mp) maximum CPU limit set to %i CPUs." % pyradmon_config["mp_cpu_limit"])
if pyradmon_config["mp_cpu_limit"] == 1:
info("(We noticed that you limited it to 1 CPU... we recommend")
info("using --mp-disable or 'mp_disable: true' instead.)")
dummymp.set_max_processes(pyradmon_config["mp_cpu_limit"])
# Make relative channel mapping!
rel_channels_dict = rel_channels(list(gen_channel_list(chans)))
for channel in gen_channel_list(chans):
info(" ** Plotting data for channel %i..." % channel)
enum_opts_dict["channel"] = channel
# HACK - see above for multichannel/single channel hack
if type(dat.keys()[0]) == int:
# Multichanel mode
try:
plot_dict_subs = subst_data(plot_dict, dat[channel])
if ("mp_disable" in pyradmon_config) and (pyradmon_config["mp_disable"]):
plot(plot_dict_subs, dat[channel], enum_opts_dict, rel_channels_dict, custom_vars, make_dirs)
else:
dummymp.run(plot, copy.deepcopy(plot_dict_subs), dat[channel], copy.deepcopy(enum_opts_dict), rel_channels_dict, custom_vars, make_dirs)
dummymp.process_process()
del plot_dict_subs
except:
critical("An error occurred! Error follows:")
critical(traceback.format_exc())
#print "Dumping data_dict:"
#pprint.pprint(dat)
critical("Exiting.")
sys.exit(1)
else:
try:
plot_dict_subs = subst_data(plot_dict, dat)
if ("mp_disable" in pyradmon_config) and (pyradmon_config["mp_disable"]):
plot(plot_dict_subs, dat, enum_opts_dict, rel_channels_dict, custom_vars, make_dirs)
else:
dummymp.run(plot, copy.deepcopy(plot_dict_subs), dat, copy.deepcopy(enum_opts_dict), rel_channels_dict, custom_vars, make_dirs)
dummymp.process_process()
del plot_dict_subs
except:
critical("An error occurred! Error follows:")
critical(traceback.format_exc())
#print "Dumping data_dict:"
#pprint.pprint(dat)
critical("Exiting.")
sys.exit(1)
if not (("mp_disable" in pyradmon_config) and (pyradmon_config["mp_disable"])):
dummymp.set_end_callback(report_status)
ncpus = dummymp.getCPUAvail()
if ncpus == 0:
info(" ** Detected that the system is overloaded. Plot generation may be slow.")
info(" ** To run tasks without waiting for CPU availability, increase priority.")
info(" ** Detected %i or more CPUs available..." % ncpus)
dummymp.process_until_done()
info("Done!")
if __name__ == "__main__":
main()
| apache-2.0 |
Brandon32/jasper-client | client/vocabcompiler.py | 34 | 19268 | # -*- coding: utf-8-*-
"""
Iterates over all the WORDS variables in the modules and creates a
vocabulary for the respective stt_engine if needed.
"""
import os
import tempfile
import logging
import hashlib
import subprocess
import tarfile
import re
import contextlib
import shutil
from abc import ABCMeta, abstractmethod, abstractproperty
import yaml
import brain
import jasperpath
from g2p import PhonetisaurusG2P
try:
import cmuclmtk
except ImportError:
logging.getLogger(__name__).error("Error importing CMUCLMTK module. " +
"PocketsphinxVocabulary will not work " +
"correctly.", exc_info=True)
class AbstractVocabulary(object):
"""
Abstract base class for Vocabulary classes.
Please note that subclasses have to implement the compile_vocabulary()
method and set a string as the PATH_PREFIX class attribute.
"""
__metaclass__ = ABCMeta
@classmethod
def phrases_to_revision(cls, phrases):
"""
Calculates a revision from phrases by using the SHA1 hash function.
Arguments:
phrases -- a list of phrases
Returns:
A revision string for given phrases.
"""
sorted_phrases = sorted(phrases)
joined_phrases = '\n'.join(sorted_phrases)
sha1 = hashlib.sha1()
sha1.update(joined_phrases)
return sha1.hexdigest()
def __init__(self, name='default', path='.'):
"""
Initializes a new Vocabulary instance.
Optional Arguments:
name -- (optional) the name of the vocabulary (Default: 'default')
path -- (optional) the path in which the vocabulary exists or will
be created (Default: '.')
"""
self.name = name
self.path = os.path.abspath(os.path.join(path, self.PATH_PREFIX, name))
self._logger = logging.getLogger(__name__)
@property
def revision_file(self):
"""
Returns:
The path of the the revision file as string
"""
return os.path.join(self.path, 'revision')
@abstractproperty
def is_compiled(self):
"""
Checks if the vocabulary is compiled by checking if the revision file
is readable. This method should be overridden by subclasses to check
for class-specific additional files, too.
Returns:
True if the dictionary is compiled, else False
"""
return os.access(self.revision_file, os.R_OK)
@property
def compiled_revision(self):
"""
Reads the compiled revision from the revision file.
Returns:
the revision of this vocabulary (i.e. the string
inside the revision file), or None if is_compiled
if False
"""
if not self.is_compiled:
return None
with open(self.revision_file, 'r') as f:
revision = f.read().strip()
self._logger.debug("compiled_revision is '%s'", revision)
return revision
def matches_phrases(self, phrases):
"""
Convenience method to check if this vocabulary exactly contains the
phrases passed to this method.
Arguments:
phrases -- a list of phrases
Returns:
True if phrases exactly matches the phrases inside this
vocabulary.
"""
return (self.compiled_revision == self.phrases_to_revision(phrases))
def compile(self, phrases, force=False):
"""
Compiles this vocabulary. If the force argument is True, compilation
will be forced regardless of necessity (which means that the
preliminary check if the current revision already equals the
revision after compilation will be skipped).
This method is not meant to be overridden by subclasses - use the
_compile_vocabulary()-method instead.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
force -- (optional) forces compilation (Default: False)
Returns:
The revision of the compiled vocabulary
"""
revision = self.phrases_to_revision(phrases)
if not force and self.compiled_revision == revision:
self._logger.debug('Compilation not neccessary, compiled ' +
'version matches phrases.')
return revision
if not os.path.exists(self.path):
self._logger.debug("Vocabulary dir '%s' does not exist, " +
"creating...", self.path)
try:
os.makedirs(self.path)
except OSError:
self._logger.error("Couldn't create vocabulary dir '%s'",
self.path, exc_info=True)
raise
try:
with open(self.revision_file, 'w') as f:
f.write(revision)
except (OSError, IOError):
self._logger.error("Couldn't write revision file in '%s'",
self.revision_file, exc_info=True)
raise
else:
self._logger.info('Starting compilation...')
try:
self._compile_vocabulary(phrases)
except Exception as e:
self._logger.error("Fatal compilation Error occured, " +
"cleaning up...", exc_info=True)
try:
os.remove(self.revision_file)
except OSError:
pass
raise e
else:
self._logger.info('Compilation done.')
return revision
@abstractmethod
def _compile_vocabulary(self, phrases):
"""
Abstract method that should be overridden in subclasses with custom
compilation code.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
"""
class DummyVocabulary(AbstractVocabulary):
PATH_PREFIX = 'dummy-vocabulary'
@property
def is_compiled(self):
"""
Checks if the vocabulary is compiled by checking if the revision
file is readable.
Returns:
True if this vocabulary has been compiled, else False
"""
return super(self.__class__, self).is_compiled
def _compile_vocabulary(self, phrases):
"""
Does nothing (because this is a dummy class for testing purposes).
"""
pass
class PocketsphinxVocabulary(AbstractVocabulary):
PATH_PREFIX = 'pocketsphinx-vocabulary'
@property
def languagemodel_file(self):
"""
Returns:
The path of the the pocketsphinx languagemodel file as string
"""
return os.path.join(self.path, 'languagemodel')
@property
def dictionary_file(self):
"""
Returns:
The path of the pocketsphinx dictionary file as string
"""
return os.path.join(self.path, 'dictionary')
@property
def is_compiled(self):
"""
Checks if the vocabulary is compiled by checking if the revision,
languagemodel and dictionary files are readable.
Returns:
True if this vocabulary has been compiled, else False
"""
return (super(self.__class__, self).is_compiled and
os.access(self.languagemodel_file, os.R_OK) and
os.access(self.dictionary_file, os.R_OK))
@property
def decoder_kwargs(self):
"""
Convenience property to use this Vocabulary with the __init__() method
of the pocketsphinx.Decoder class.
Returns:
A dict containing kwargs for the pocketsphinx.Decoder.__init__()
method.
Example:
decoder = pocketsphinx.Decoder(**vocab_instance.decoder_kwargs,
hmm='/path/to/hmm')
"""
return {'lm': self.languagemodel_file, 'dict': self.dictionary_file}
def _compile_vocabulary(self, phrases):
"""
Compiles the vocabulary to the Pocketsphinx format by creating a
languagemodel and a dictionary.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
"""
text = " ".join([("<s> %s </s>" % phrase) for phrase in phrases])
self._logger.debug('Compiling languagemodel...')
vocabulary = self._compile_languagemodel(text, self.languagemodel_file)
self._logger.debug('Starting dictionary...')
self._compile_dictionary(vocabulary, self.dictionary_file)
def _compile_languagemodel(self, text, output_file):
"""
Compiles the languagemodel from a text.
Arguments:
text -- the text the languagemodel will be generated from
output_file -- the path of the file this languagemodel will
be written to
Returns:
A list of all unique words this vocabulary contains.
"""
with tempfile.NamedTemporaryFile(suffix='.vocab', delete=False) as f:
vocab_file = f.name
# Create vocab file from text
self._logger.debug("Creating vocab file: '%s'", vocab_file)
cmuclmtk.text2vocab(text, vocab_file)
# Create language model from text
self._logger.debug("Creating languagemodel file: '%s'", output_file)
cmuclmtk.text2lm(text, output_file, vocab_file=vocab_file)
# Get words from vocab file
self._logger.debug("Getting words from vocab file and removing it " +
"afterwards...")
words = []
with open(vocab_file, 'r') as f:
for line in f:
line = line.strip()
if not line.startswith('#') and line not in ('<s>', '</s>'):
words.append(line)
os.remove(vocab_file)
return words
def _compile_dictionary(self, words, output_file):
"""
Compiles the dictionary from a list of words.
Arguments:
words -- a list of all unique words this vocabulary contains
output_file -- the path of the file this dictionary will
be written to
"""
# create the dictionary
self._logger.debug("Getting phonemes for %d words...", len(words))
g2pconverter = PhonetisaurusG2P(**PhonetisaurusG2P.get_config())
phonemes = g2pconverter.translate(words)
self._logger.debug("Creating dict file: '%s'", output_file)
with open(output_file, "w") as f:
for word, pronounciations in phonemes.items():
for i, pronounciation in enumerate(pronounciations, start=1):
if i == 1:
line = "%s\t%s\n" % (word, pronounciation)
else:
line = "%s(%d)\t%s\n" % (word, i, pronounciation)
f.write(line)
class JuliusVocabulary(AbstractVocabulary):
class VoxForgeLexicon(object):
def __init__(self, fname, membername=None):
self._dict = {}
self.parse(fname, membername)
@contextlib.contextmanager
def open_dict(self, fname, membername=None):
if tarfile.is_tarfile(fname):
if not membername:
raise ValueError('archive membername not set!')
tf = tarfile.open(fname)
f = tf.extractfile(membername)
yield f
f.close()
tf.close()
else:
with open(fname) as f:
yield f
def parse(self, fname, membername=None):
pattern = re.compile(r'\[(.+)\]\W(.+)')
with self.open_dict(fname, membername=membername) as f:
for line in f:
matchobj = pattern.search(line)
if matchobj:
word, phoneme = [x.strip() for x in matchobj.groups()]
if word in self._dict:
self._dict[word].append(phoneme)
else:
self._dict[word] = [phoneme]
def translate_word(self, word):
if word in self._dict:
return self._dict[word]
else:
return []
PATH_PREFIX = 'julius-vocabulary'
@property
def dfa_file(self):
"""
Returns:
The path of the the julius dfa file as string
"""
return os.path.join(self.path, 'dfa')
@property
def dict_file(self):
"""
Returns:
The path of the the julius dict file as string
"""
return os.path.join(self.path, 'dict')
@property
def is_compiled(self):
return (super(self.__class__, self).is_compiled and
os.access(self.dfa_file, os.R_OK) and
os.access(self.dict_file, os.R_OK))
def _get_grammar(self, phrases):
return {'S': [['NS_B', 'WORD_LOOP', 'NS_E']],
'WORD_LOOP': [['WORD_LOOP', 'WORD'], ['WORD']]}
def _get_word_defs(self, lexicon, phrases):
word_defs = {'NS_B': [('<s>', 'sil')],
'NS_E': [('</s>', 'sil')],
'WORD': []}
words = []
for phrase in phrases:
if ' ' in phrase:
for word in phrase.split(' '):
words.append(word)
else:
words.append(phrase)
for word in words:
for phoneme in lexicon.translate_word(word):
word_defs['WORD'].append((word, phoneme))
return word_defs
def _compile_vocabulary(self, phrases):
prefix = 'jasper'
tmpdir = tempfile.mkdtemp()
lexicon_file = jasperpath.data('julius-stt', 'VoxForge.tgz')
lexicon_archive_member = 'VoxForge/VoxForgeDict'
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'julius' in profile:
if 'lexicon' in profile['julius']:
lexicon_file = profile['julius']['lexicon']
if 'lexicon_archive_member' in profile['julius']:
lexicon_archive_member = \
profile['julius']['lexicon_archive_member']
lexicon = JuliusVocabulary.VoxForgeLexicon(lexicon_file,
lexicon_archive_member)
# Create grammar file
tmp_grammar_file = os.path.join(tmpdir,
os.extsep.join([prefix, 'grammar']))
with open(tmp_grammar_file, 'w') as f:
grammar = self._get_grammar(phrases)
for definition in grammar.pop('S'):
f.write("%s: %s\n" % ('S', ' '.join(definition)))
for name, definitions in grammar.items():
for definition in definitions:
f.write("%s: %s\n" % (name, ' '.join(definition)))
# Create voca file
tmp_voca_file = os.path.join(tmpdir, os.extsep.join([prefix, 'voca']))
with open(tmp_voca_file, 'w') as f:
for category, words in self._get_word_defs(lexicon,
phrases).items():
f.write("%% %s\n" % category)
for word, phoneme in words:
f.write("%s\t\t\t%s\n" % (word, phoneme))
# mkdfa.pl
olddir = os.getcwd()
os.chdir(tmpdir)
cmd = ['mkdfa.pl', str(prefix)]
with tempfile.SpooledTemporaryFile() as out_f:
subprocess.call(cmd, stdout=out_f, stderr=out_f)
out_f.seek(0)
for line in out_f.read().splitlines():
line = line.strip()
if line:
self._logger.debug(line)
os.chdir(olddir)
tmp_dfa_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dfa']))
tmp_dict_file = os.path.join(tmpdir, os.extsep.join([prefix, 'dict']))
shutil.move(tmp_dfa_file, self.dfa_file)
shutil.move(tmp_dict_file, self.dict_file)
shutil.rmtree(tmpdir)
def get_phrases_from_module(module):
"""
Gets phrases from a module.
Arguments:
module -- a module reference
Returns:
The list of phrases in this module.
"""
return module.WORDS if hasattr(module, 'WORDS') else []
def get_keyword_phrases():
"""
Gets the keyword phrases from the keywords file in the jasper data dir.
Returns:
A list of keyword phrases.
"""
phrases = []
with open(jasperpath.data('keyword_phrases'), mode="r") as f:
for line in f:
phrase = line.strip()
if phrase:
phrases.append(phrase)
return phrases
def get_all_phrases():
"""
Gets phrases from all modules.
Returns:
A list of phrases in all modules plus additional phrases passed to this
function.
"""
phrases = []
modules = brain.Brain.get_modules()
for module in modules:
phrases.extend(get_phrases_from_module(module))
return sorted(list(set(phrases)))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Vocabcompiler Demo')
parser.add_argument('--base-dir', action='store',
help='the directory in which the vocabulary will be ' +
'compiled.')
parser.add_argument('--debug', action='store_true',
help='show debug messages')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
base_dir = args.base_dir if args.base_dir else tempfile.mkdtemp()
phrases = get_all_phrases()
print("Module phrases: %r" % phrases)
for subclass in AbstractVocabulary.__subclasses__():
if hasattr(subclass, 'PATH_PREFIX'):
vocab = subclass(path=base_dir)
print("Vocabulary in: %s" % vocab.path)
print("Revision file: %s" % vocab.revision_file)
print("Compiled revision: %s" % vocab.compiled_revision)
print("Is compiled: %r" % vocab.is_compiled)
print("Matches phrases: %r" % vocab.matches_phrases(phrases))
if not vocab.is_compiled or not vocab.matches_phrases(phrases):
print("Compiling...")
vocab.compile(phrases)
print("")
print("Vocabulary in: %s" % vocab.path)
print("Revision file: %s" % vocab.revision_file)
print("Compiled revision: %s" % vocab.compiled_revision)
print("Is compiled: %r" % vocab.is_compiled)
print("Matches phrases: %r" % vocab.matches_phrases(phrases))
print("")
if not args.base_dir:
print("Removing temporary directory '%s'..." % base_dir)
shutil.rmtree(base_dir)
| mit |
piMoll/SEILAPLAN | lib/reportlab/graphics/barcode/usps4s.py | 2 | 15593 | #copyright ReportLab Inc. 2000-2016
#see license.txt for license details
from __future__ import print_function
__version__='3.3.0'
__all__ = ('USPS_4State',)
from reportlab.lib.colors import black
from reportlab.graphics.barcode.common import Barcode
from reportlab.lib.utils import asNative
def nhex(i):
'normalized hex'
r = hex(i)
r = r[:2]+r[2:].lower()
if r.endswith('l'): r = r[:-1]
return r
class USPS_4State(Barcode):
''' USPS 4-State OneView (TM) barcode. All info from USPS-B-3200A
'''
_widthSize = 1
_heightSize = 1
_fontSize = 11
_humanReadable = 0
if True:
tops = dict(
F = (0.0625,0.0825),
T = (0.0195,0.0285),
A = (0.0625,0.0825),
D = (0.0195,0.0285),
)
bottoms = dict(
F = (-0.0625,-0.0825),
T = (-0.0195,-0.0285),
D = (-0.0625,-0.0825),
A = (-0.0195,-0.0285),
)
dimensions = dict(
width = (0.015, 0.025),
pitch = (0.0416, 0.050),
hcz = (0.125,0.125),
vcz = (0.028,0.028),
)
else:
tops = dict(
F = (0.067,0.115),
T = (0.021,0.040),
A = (0.067,0.115),
D = (0.021,0.040),
)
bottoms = dict(
F = (-0.067,-0.115),
D = (-0.067,-0.115),
T = (-0.021,-0.040),
A = (-0.021,-0.040),
)
dimensions = dict(
width = (0.015, 0.025),
pitch = (0.0416,0.050),
hcz = (0.125,0.125),
vcz = (0.040,0.040),
)
def __init__(self,value='01234567094987654321',routing='',**kwd):
self._init()
value = str(value) if isinstance(value,int) else asNative(value)
if not routing:
#legal values for combined tracking + routing
if len(value) in (20,25,29,31):
value, routing = value[:20], value[20:]
else:
raise ValueError('value+routing length must be 20, 25, 29 or 31 digits not %d' % len(value))
elif len(routing) not in (5,9,11):
raise ValueError('routing length must be 5, 9 or 11 digits not %d' % len(routing))
self._tracking = value
self._routing = routing
self._setKeywords(**kwd)
def _init(self):
self._bvalue = None
self._codewords = None
self._characters = None
self._barcodes = None
def scale(kind,D,s):
V = D[kind]
return 72*(V[0]*(1-s)+s*V[1])
scale = staticmethod(scale)
def tracking(self,tracking):
self._init()
self._tracking = tracking
tracking = property(lambda self: self._tracking,tracking)
def routing(self,routing):
self._init()
self._routing = routing
routing = property(lambda self: self._routing,routing)
def widthSize(self,value):
self._sized = None
self._widthSize = min(max(0,value),1)
widthSize = property(lambda self: self._widthSize,widthSize)
def heightSize(self,value):
self._sized = None
self._heightSize = value
heightSize = property(lambda self: self._heightSize,heightSize)
def fontSize(self,value):
self._sized = None
self._fontSize = value
fontSize = property(lambda self: self._fontSize,fontSize)
def humanReadable(self,value):
self._sized = None
self._humanReadable = value
humanReadable = property(lambda self: self._humanReadable,humanReadable)
def binary(self):
'''convert the 4 state string values to binary
>>> print(nhex(USPS_4State('01234567094987654321','').binary))
0x1122103b5c2004b1
>>> print(nhex(USPS_4State('01234567094987654321','01234').binary))
0xd138a87bab5cf3804b1
>>> print(nhex(USPS_4State('01234567094987654321','012345678').binary))
0x202bdc097711204d21804b1
>>> print(nhex(USPS_4State('01234567094987654321','01234567891').binary))
0x16907b2a24abc16a2e5c004b1
'''
value = self._bvalue
if not value:
routing = self.routing
n = len(routing)
try:
if n==0:
value = 0
elif n==5:
value = int(routing)+1
elif n==9:
value = int(routing)+100001
elif n==11:
value = int(routing)+1000100001
else:
raise ValueError
except:
raise ValueError('Problem converting %s, routing code must be 0, 5, 9 or 11 digits' % routing)
tracking = self.tracking
svalue = tracking[0:2]
try:
value *= 10
value += int(svalue[0])
value *= 5
value += int(svalue[1])
except:
raise ValueError('Problem converting %s, barcode identifier must be 2 digits' % svalue)
i = 2
for name,nd in (('special services',3), ('customer identifier',6), ('sequence number',9)):
j = i
i += nd
svalue = tracking[j:i]
try:
if len(svalue)!=nd: raise ValueError
for j in range(nd):
value *= 10
value += int(svalue[j])
except:
raise ValueError('Problem converting %s, %s must be %d digits' % (svalue,name,nd))
self._bvalue = value
return value
binary = property(binary)
def codewords(self):
'''convert binary value into codewords
>>> print(USPS_4State('01234567094987654321','01234567891').codewords)
(673, 787, 607, 1022, 861, 19, 816, 1294, 35, 602)
'''
if not self._codewords:
value = self.binary
A, J = divmod(value,636)
A, I = divmod(A,1365)
A, H = divmod(A,1365)
A, G = divmod(A,1365)
A, F = divmod(A,1365)
A, E = divmod(A,1365)
A, D = divmod(A,1365)
A, C = divmod(A,1365)
A, B = divmod(A,1365)
assert 0<=A<=658, 'improper value %s passed to _2codewords A-->%s' % (hex(int(value)),A)
self._fcs = _crc11(value)
if self._fcs&1024: A += 659
J *= 2
self._codewords = tuple(map(int,(A,B,C,D,E,F,G,H,I,J)))
return self._codewords
codewords = property(codewords)
def table1(self):
self.__class__.table1 = _initNof13Table(5,1287)
return self.__class__.table1
table1 = property(table1)
def table2(self):
self.__class__.table2 = _initNof13Table(2,78)
return self.__class__.table2
table2 = property(table2)
def characters(self):
''' convert own codewords to characters
>>> print(' '.join(hex(c)[2:] for c in USPS_4State('01234567094987654321','01234567891').characters))
dcb 85c 8e4 b06 6dd 1740 17c6 1200 123f 1b2b
'''
if not self._characters:
codewords = self.codewords
fcs = self._fcs
C = []
aC = C.append
table1 = self.table1
table2 = self.table2
for i in range(10):
cw = codewords[i]
if cw<=1286:
c = table1[cw]
else:
c = table2[cw-1287]
if (fcs>>i)&1:
c = ~c & 0x1fff
aC(c)
self._characters = tuple(C)
return self._characters
characters = property(characters)
def barcodes(self):
'''Get 4 state bar codes for current routing and tracking
>>> print(USPS_4State('01234567094987654321','01234567891').barcodes)
AADTFFDFTDADTAADAATFDTDDAAADDTDTTDAFADADDDTFFFDDTTTADFAAADFTDAADA
'''
if not self._barcodes:
C = self.characters
B = []
aB = B.append
bits2bars = self._bits2bars
for dc,db,ac,ab in self.table4:
aB(bits2bars[((C[dc]>>db)&1)+2*((C[ac]>>ab)&1)])
self._barcodes = ''.join(B)
return self._barcodes
barcodes = property(barcodes)
table4 = ((7, 2, 4, 3), (1, 10, 0, 0), (9, 12, 2, 8), (5, 5, 6, 11),
(8, 9, 3, 1), (0, 1, 5, 12), (2, 5, 1, 8), (4, 4, 9, 11),
(6, 3, 8, 10), (3, 9, 7, 6), (5, 11, 1, 4), (8, 5, 2, 12),
(9, 10, 0, 2), (7, 1, 6, 7), (3, 6, 4, 9), (0, 3, 8, 6),
(6, 4, 2, 7), (1, 1, 9, 9), (7, 10, 5, 2), (4, 0, 3, 8),
(6, 2, 0, 4), (8, 11, 1, 0), (9, 8, 3, 12), (2, 6, 7, 7),
(5, 1, 4, 10), (1, 12, 6, 9), (7, 3, 8, 0), (5, 8, 9, 7),
(4, 6, 2, 10), (3, 4, 0, 5), (8, 4, 5, 7), (7, 11, 1, 9),
(6, 0, 9, 6), (0, 6, 4, 8), (2, 1, 3, 2), (5, 9, 8, 12),
(4, 11, 6, 1), (9, 5, 7, 4), (3, 3, 1, 2), (0, 7, 2, 0),
(1, 3, 4, 1), (6, 10, 3, 5), (8, 7, 9, 4), (2, 11, 5, 6),
(0, 8, 7, 12), (4, 2, 8, 1), (5, 10, 3, 0), (9, 3, 0, 9),
(6, 5, 2, 4), (7, 8, 1, 7), (5, 0, 4, 5), (2, 3, 0, 10),
(6, 12, 9, 2), (3, 11, 1, 6), (8, 8, 7, 9), (5, 4, 0, 11),
(1, 5, 2, 2), (9, 1, 4, 12), (8, 3, 6, 6), (7, 0, 3, 7),
(4, 7, 7, 5), (0, 12, 1, 11), (2, 9, 9, 0), (6, 8, 5, 3),
(3, 10, 8, 2))
_bits2bars = 'T','D','A','F'
horizontalClearZone = property(lambda self: self.scale('hcz',self.dimensions,self.widthScale))
verticalClearZone = property(lambda self: self.scale('vcz',self.dimensions,self.heightScale))
@property
def barWidth(self):
if '_barWidth' in self.__dict__:
return self.__dict__['_barWidth']
return self.scale('width',self.dimensions,self.widthScale)
@barWidth.setter
def barWidth(self,value):
n, x = self.dimensions['width']
self.__dict__['_barWidth'] = 72*min(max(value/72.0,n),x)
@property
def pitch(self):
if '_pitch' in self.__dict__:
return self.__dict__['_pitch']
return self.scale('pitch',self.dimensions,self.widthScale)
@pitch.setter
def pitch(self,value):
n, x = self.dimensions['pitch']
self.__dict__['_pitch'] = 72*min(max(value/72.0,n),x)
@property
def barHeight(self):
if '_barHeight' in self.__dict__:
return self.__dict__['_barHeight']
return self.scale('F',self.tops,self.heightScale) - self.scale('F',self.bottoms,self.heightScale)
@barHeight.setter
def barHeight(self,value):
n = self.tops['F'][0] - self.bottoms['F'][0]
x = self.tops['F'][1] - self.bottoms['F'][1]
value = self.__dict__['_barHeight'] = 72*min(max(value/72.0,n),x)
self.heightSize = (value - n)/(x-n)
widthScale = property(lambda self: min(1,max(0,self.widthSize)))
heightScale = property(lambda self: min(1,max(0,self.heightSize)))
def width(self):
self.computeSize()
return self._width
width = property(width)
def height(self):
self.computeSize()
return self._height
height = property(height)
def computeSize(self):
if not getattr(self,'_sized',None):
ws = self.widthScale
hs = self.heightScale
barHeight = self.barHeight
barWidth = self.barWidth
pitch = self.pitch
hcz = self.horizontalClearZone
vcz = self.verticalClearZone
self._width = 2*hcz + barWidth + 64*pitch
self._height = 2*vcz+barHeight
if self.humanReadable:
self._height += self.fontSize*1.2+vcz
self._sized = True
def wrap(self,aW,aH):
self.computeSize()
return self.width, self.height
def _getBarVInfo(self,y0=0):
vInfo = {}
hs = self.heightScale
for b in ('T','D','A','F'):
y = self.scale(b,self.bottoms,hs)+y0
vInfo[b] = y,self.scale(b,self.tops,hs)+y0 - y
return vInfo
def draw(self):
self.computeSize()
hcz = self.horizontalClearZone
vcz = self.verticalClearZone
bw = self.barWidth
x = hcz
y0 = vcz+self.barHeight*0.5
dw = self.pitch
vInfo = self._getBarVInfo(y0)
for b in self.barcodes:
yb, hb = vInfo[b]
self.rect(x,yb,bw,hb)
x += dw
self.drawHumanReadable()
def value(self):
tracking = self.tracking
routing = self.routing
routing = routing and (routing,) or ()
return ' '.join((tracking[0:2],tracking[2:5],tracking[5:11],tracking[11:])+routing)
value = property(value,lambda self,value: self.__dict__.__setitem__('tracking',value))
def drawHumanReadable(self):
if self.humanReadable:
hcz = self.horizontalClearZone
vcz = self.verticalClearZone
fontName = self.fontName
fontSize = self.fontSize
y = self.barHeight+2*vcz+0.2*fontSize
self.annotate(hcz,y,self.value,fontName,fontSize)
def annotate(self,x,y,text,fontName,fontSize,anchor='middle'):
Barcode.annotate(self,x,y,text,fontName,fontSize,anchor='start')
def _crc11(value):
'''
>>> usps = [USPS_4State('01234567094987654321',x).binary for x in ('','01234','012345678','01234567891')]
>>> print(' '.join(nhex(x) for x in usps))
0x1122103b5c2004b1 0xd138a87bab5cf3804b1 0x202bdc097711204d21804b1 0x16907b2a24abc16a2e5c004b1
>>> print(' '.join(nhex(_crc11(x)) for x in usps))
0x51 0x65 0x606 0x751
'''
hexbytes = nhex(int(value))[2:]
hexbytes = '0'*(26-len(hexbytes))+hexbytes
gp = 0x0F35
fcs = 0x07FF
data = int(hexbytes[:2],16)<<5
for b in range(2,8):
if (fcs ^ data)&0x400:
fcs = (fcs<<1)^gp
else:
fcs = fcs<<1
fcs &= 0x7ff
data <<= 1
for x in range(2,2*13,2):
data = int(hexbytes[x:x+2],16)<<3
for b in range(8):
if (fcs ^ data)&0x400:
fcs = (fcs<<1)^gp
else:
fcs = fcs<<1
fcs &= 0x7ff
data <<= 1
return fcs
def _ru13(i):
'''reverse unsigned 13 bit number
>>> print(_ru13(7936), _ru13(31), _ru13(47), _ru13(7808))
31 7936 7808 47
'''
r = 0
for x in range(13):
r <<= 1
r |= i & 1
i >>= 1
return r
def _initNof13Table(N,lenT):
'''create and return table of 13 bit values with N bits on
>>> T = _initNof13Table(5,1287)
>>> print(' '.join('T[%d]=%d' % (i, T[i]) for i in (0,1,2,3,4,1271,1272,1284,1285,1286)))
T[0]=31 T[1]=7936 T[2]=47 T[3]=7808 T[4]=55 T[1271]=6275 T[1272]=6211 T[1284]=856 T[1285]=744 T[1286]=496
'''
T = lenT*[None]
l = 0
u = lenT-1
for c in range(8192):
bc = 0
for b in range(13):
bc += (c&(1<<b))!=0
if bc!=N: continue
r = _ru13(c)
if r<c: continue #we already looked at this pair
if r==c:
T[u] = c
u -= 1
else:
T[l] = c
l += 1
T[l] = r
l += 1
assert l==(u+1), 'u+1(%d)!=l(%d) for %d of 13 table' % (u+1,l,N)
return T
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| gpl-2.0 |
rabipanda/tensorflow | tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py | 13 | 6633 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for moving_average_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tempfile
import six
from tensorflow.contrib.opt.python.training import moving_average_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver
class MovingAverageOptimizerTest(test.TestCase):
def testRun(self):
for sequential_update in [True, False]:
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session() as sess:
orig_val0 = [1.0, 2.0]
orig_val1 = [3.0, 4.0]
var0 = variables.Variable(orig_val0, name='var0', dtype=dtype)
var1 = variables.Variable(orig_val1, name='var1', dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
opt = moving_average_optimizer.MovingAverageOptimizer(
gradient_descent.GradientDescentOptimizer(learning_rate=2.0),
average_decay=0.5,
sequential_update=sequential_update)
save_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'run_1'))
save_path = os.path.join(save_dir, 'model')
update = opt.apply_gradients(
list(six.moves.zip([grads0, grads1], [var0, var1])))
train_saver = opt.swapping_saver()
inference_saver = saver.Saver()
variables.global_variables_initializer().run()
# Step 1.
update.run()
val0 = var0.eval()
val1 = var1.eval()
self.assertAllCloseAccordingToType([0.8, 1.8], var0.eval())
self.assertAllCloseAccordingToType([2.98, 3.98], var1.eval())
# Test that the swapping saver save/restore operation is identity.
train_saver.save(sess, save_path)
train_saver.restore(sess, save_path)
val0 = var0.eval()
val1 = var1.eval()
self.assertAllCloseAccordingToType([0.8, 1.8], var0.eval())
self.assertAllCloseAccordingToType([2.98, 3.98], var1.eval())
# If updates are parallel, this is not always true after the 1st step.
if sequential_update:
# Test that the normal saver will have the averaged variables.
# We test that the average values are between the original value
# and the most recent variable values (since they are an average
# of the two).
val0 = var0.eval()
val1 = var1.eval()
train_saver.save(sess, save_path)
inference_saver.restore(sess, save_path)
avg_val0 = var0.eval()
avg_val1 = var1.eval()
for i in six.moves.range(len(val0)):
self.assertLess(val0[i], avg_val0[i])
self.assertLess(avg_val0[i], orig_val0[i])
self.assertLess(val1[i], avg_val1[i])
self.assertLess(avg_val1[i], orig_val1[i])
train_saver.restore(sess, save_path)
# Step 2.
update.run()
# Test that the normal saver will have the averaged variables.
# We test that the average values are between the original value and
# the most recent variable values (since they are an average of the
# two).
val0 = var0.eval()
val1 = var1.eval()
self.assertAllCloseAccordingToType([0.6, 1.6], val0)
self.assertAllCloseAccordingToType([2.96, 3.96], val1)
train_saver.save(sess, save_path)
inference_saver.restore(sess, save_path)
avg_val0 = var0.eval()
avg_val1 = var1.eval()
for i in six.moves.range(len(val0)):
self.assertLess(val0[i], avg_val0[i])
self.assertLess(avg_val0[i], orig_val0[i])
self.assertLess(val1[i], avg_val1[i])
self.assertLess(avg_val1[i], orig_val1[i])
def testFailWhenSaverCreatedBeforeInitialized(self):
with self.test_session():
var = variables.Variable([1.0], name='var', dtype=dtypes.float32)
opt = moving_average_optimizer.MovingAverageOptimizer(
gradient_descent.GradientDescentOptimizer(learning_rate=2.0))
# We didn't call apply_gradients yet.
# This will raise an exception.
with self.assertRaises(RuntimeError):
_ = opt.swapping_saver([var])
def testCorrectOverride(self):
class WrapperOptimizer(gradient_descent.GradientDescentOptimizer):
def compute_gradients(self, *args, **kwargs):
self.compute_gradients_called = True
return super(WrapperOptimizer, self).compute_gradients(
*args, **kwargs)
def apply_gradients(self, *args, **kwargs):
self.apply_gradients_called = True
return super(WrapperOptimizer, self).apply_gradients(*args, **kwargs)
with self.test_session() as sess:
var = variables.Variable([1.2], name='var', dtype=dtypes.float32)
loss = var ** 2
wrapper_opt = WrapperOptimizer(learning_rate=2.0)
opt = moving_average_optimizer.MovingAverageOptimizer(wrapper_opt)
train_op = opt.minimize(loss)
# Check that both methods are called on the underlying optimizer.
self.assertTrue(wrapper_opt.compute_gradients_called)
self.assertTrue(wrapper_opt.apply_gradients_called)
# Run train_op once, and verify that we've updated the variable.
variables.global_variables_initializer().run()
sess.run(train_op)
var_value = sess.run(var)
# Started at 1.2, gradient is 2*1.2=2.4, lr=2, so should now be -3.6.
self.assertNear(-3.6, var_value, 1e-6)
if __name__ == '__main__':
test.main()
| apache-2.0 |
vrtadmin/clamav-devel | win32/3rdparty/libxml2/python/libxml2class.py | 1 | 321365 | #
# Functions from module HTMLparser
#
def htmlCreateMemoryParserCtxt(buffer, size):
"""Create a parser context for an HTML in-memory document. """
ret = libxml2mod.htmlCreateMemoryParserCtxt(buffer, size)
if ret is None:raise parserError('htmlCreateMemoryParserCtxt() failed')
return parserCtxt(_obj=ret)
def htmlHandleOmittedElem(val):
"""Set and return the previous value for handling HTML omitted
tags. """
ret = libxml2mod.htmlHandleOmittedElem(val)
return ret
def htmlIsScriptAttribute(name):
"""Check if an attribute is of content type Script """
ret = libxml2mod.htmlIsScriptAttribute(name)
return ret
def htmlNewParserCtxt():
"""Allocate and initialize a new parser context. """
ret = libxml2mod.htmlNewParserCtxt()
if ret is None:raise parserError('htmlNewParserCtxt() failed')
return parserCtxt(_obj=ret)
def htmlParseDoc(cur, encoding):
"""parse an HTML in-memory document and build a tree. """
ret = libxml2mod.htmlParseDoc(cur, encoding)
if ret is None:raise parserError('htmlParseDoc() failed')
return xmlDoc(_obj=ret)
def htmlParseFile(filename, encoding):
"""parse an HTML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. """
ret = libxml2mod.htmlParseFile(filename, encoding)
if ret is None:raise parserError('htmlParseFile() failed')
return xmlDoc(_obj=ret)
def htmlReadDoc(cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.htmlReadDoc(cur, URL, encoding, options)
if ret is None:raise treeError('htmlReadDoc() failed')
return xmlDoc(_obj=ret)
def htmlReadFd(fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. """
ret = libxml2mod.htmlReadFd(fd, URL, encoding, options)
if ret is None:raise treeError('htmlReadFd() failed')
return xmlDoc(_obj=ret)
def htmlReadFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. """
ret = libxml2mod.htmlReadFile(filename, encoding, options)
if ret is None:raise treeError('htmlReadFile() failed')
return xmlDoc(_obj=ret)
def htmlReadMemory(buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.htmlReadMemory(buffer, size, URL, encoding, options)
if ret is None:raise treeError('htmlReadMemory() failed')
return xmlDoc(_obj=ret)
#
# Functions from module HTMLtree
#
def htmlIsBooleanAttr(name):
"""Determine if a given attribute is a boolean attribute. """
ret = libxml2mod.htmlIsBooleanAttr(name)
return ret
def htmlNewDoc(URI, ExternalID):
"""Creates a new HTML document """
ret = libxml2mod.htmlNewDoc(URI, ExternalID)
if ret is None:raise treeError('htmlNewDoc() failed')
return xmlDoc(_obj=ret)
def htmlNewDocNoDtD(URI, ExternalID):
"""Creates a new HTML document without a DTD node if @URI and
@ExternalID are None """
ret = libxml2mod.htmlNewDocNoDtD(URI, ExternalID)
if ret is None:raise treeError('htmlNewDocNoDtD() failed')
return xmlDoc(_obj=ret)
#
# Functions from module SAX2
#
def SAXDefaultVersion(version):
"""Set the default version of SAX used globally by the
library. By default, during initialization the default is
set to 2. Note that it is generally a better coding style
to use xmlSAXVersion() to set up the version explicitly for
a given parsing context. """
ret = libxml2mod.xmlSAXDefaultVersion(version)
return ret
def defaultSAXHandlerInit():
"""Initialize the default SAX2 handler """
libxml2mod.xmlDefaultSAXHandlerInit()
def docbDefaultSAXHandlerInit():
"""Initialize the default SAX handler """
libxml2mod.docbDefaultSAXHandlerInit()
def htmlDefaultSAXHandlerInit():
"""Initialize the default SAX handler """
libxml2mod.htmlDefaultSAXHandlerInit()
#
# Functions from module catalog
#
def catalogAdd(type, orig, replace):
"""Add an entry in the catalog, it may overwrite existing but
different entries. If called before any other catalog
routine, allows to override the default shared catalog put
in place by xmlInitializeCatalog(); """
ret = libxml2mod.xmlCatalogAdd(type, orig, replace)
return ret
def catalogCleanup():
"""Free up all the memory associated with catalogs """
libxml2mod.xmlCatalogCleanup()
def catalogConvert():
"""Convert all the SGML catalog entries as XML ones """
ret = libxml2mod.xmlCatalogConvert()
return ret
def catalogDump(out):
"""Dump all the global catalog content to the given file. """
if out is not None: out.flush()
libxml2mod.xmlCatalogDump(out)
def catalogGetPublic(pubID):
"""Try to lookup the catalog reference associated to a public
ID DEPRECATED, use xmlCatalogResolvePublic() """
ret = libxml2mod.xmlCatalogGetPublic(pubID)
return ret
def catalogGetSystem(sysID):
"""Try to lookup the catalog reference associated to a system
ID DEPRECATED, use xmlCatalogResolveSystem() """
ret = libxml2mod.xmlCatalogGetSystem(sysID)
return ret
def catalogRemove(value):
"""Remove an entry from the catalog """
ret = libxml2mod.xmlCatalogRemove(value)
return ret
def catalogResolve(pubID, sysID):
"""Do a complete resolution lookup of an External Identifier """
ret = libxml2mod.xmlCatalogResolve(pubID, sysID)
return ret
def catalogResolvePublic(pubID):
"""Try to lookup the catalog reference associated to a public
ID """
ret = libxml2mod.xmlCatalogResolvePublic(pubID)
return ret
def catalogResolveSystem(sysID):
"""Try to lookup the catalog resource for a system ID """
ret = libxml2mod.xmlCatalogResolveSystem(sysID)
return ret
def catalogResolveURI(URI):
"""Do a complete resolution lookup of an URI """
ret = libxml2mod.xmlCatalogResolveURI(URI)
return ret
def catalogSetDebug(level):
"""Used to set the debug level for catalog operation, 0
disable debugging, 1 enable it """
ret = libxml2mod.xmlCatalogSetDebug(level)
return ret
def initializeCatalog():
"""Do the catalog initialization. this function is not thread
safe, catalog initialization should preferably be done once
at startup """
libxml2mod.xmlInitializeCatalog()
def loadACatalog(filename):
"""Load the catalog and build the associated data structures.
This can be either an XML Catalog or an SGML Catalog It
will recurse in SGML CATALOG entries. On the other hand XML
Catalogs are not handled recursively. """
ret = libxml2mod.xmlLoadACatalog(filename)
if ret is None:raise treeError('xmlLoadACatalog() failed')
return catalog(_obj=ret)
def loadCatalog(filename):
"""Load the catalog and makes its definitions effective for
the default external entity loader. It will recurse in SGML
CATALOG entries. this function is not thread safe, catalog
initialization should preferably be done once at startup """
ret = libxml2mod.xmlLoadCatalog(filename)
return ret
def loadCatalogs(pathss):
"""Load the catalogs and makes their definitions effective for
the default external entity loader. this function is not
thread safe, catalog initialization should preferably be
done once at startup """
libxml2mod.xmlLoadCatalogs(pathss)
def loadSGMLSuperCatalog(filename):
"""Load an SGML super catalog. It won't expand CATALOG or
DELEGATE references. This is only needed for manipulating
SGML Super Catalogs like adding and removing CATALOG or
DELEGATE entries. """
ret = libxml2mod.xmlLoadSGMLSuperCatalog(filename)
if ret is None:raise treeError('xmlLoadSGMLSuperCatalog() failed')
return catalog(_obj=ret)
def newCatalog(sgml):
"""create a new Catalog. """
ret = libxml2mod.xmlNewCatalog(sgml)
if ret is None:raise treeError('xmlNewCatalog() failed')
return catalog(_obj=ret)
def parseCatalogFile(filename):
"""parse an XML file and build a tree. It's like
xmlParseFile() except it bypass all catalog lookups. """
ret = libxml2mod.xmlParseCatalogFile(filename)
if ret is None:raise parserError('xmlParseCatalogFile() failed')
return xmlDoc(_obj=ret)
#
# Functions from module chvalid
#
def isBaseChar(ch):
"""This function is DEPRECATED. Use xmlIsBaseChar_ch or
xmlIsBaseCharQ instead """
ret = libxml2mod.xmlIsBaseChar(ch)
return ret
def isBlank(ch):
"""This function is DEPRECATED. Use xmlIsBlank_ch or
xmlIsBlankQ instead """
ret = libxml2mod.xmlIsBlank(ch)
return ret
def isChar(ch):
"""This function is DEPRECATED. Use xmlIsChar_ch or xmlIsCharQ
instead """
ret = libxml2mod.xmlIsChar(ch)
return ret
def isCombining(ch):
"""This function is DEPRECATED. Use xmlIsCombiningQ instead """
ret = libxml2mod.xmlIsCombining(ch)
return ret
def isDigit(ch):
"""This function is DEPRECATED. Use xmlIsDigit_ch or
xmlIsDigitQ instead """
ret = libxml2mod.xmlIsDigit(ch)
return ret
def isExtender(ch):
"""This function is DEPRECATED. Use xmlIsExtender_ch or
xmlIsExtenderQ instead """
ret = libxml2mod.xmlIsExtender(ch)
return ret
def isIdeographic(ch):
"""This function is DEPRECATED. Use xmlIsIdeographicQ instead """
ret = libxml2mod.xmlIsIdeographic(ch)
return ret
def isPubidChar(ch):
"""This function is DEPRECATED. Use xmlIsPubidChar_ch or
xmlIsPubidCharQ instead """
ret = libxml2mod.xmlIsPubidChar(ch)
return ret
#
# Functions from module debugXML
#
def boolToText(boolval):
"""Convenient way to turn bool into text """
ret = libxml2mod.xmlBoolToText(boolval)
return ret
def debugDumpString(output, str):
"""Dumps informations about the string, shorten it if necessary """
if output is not None: output.flush()
libxml2mod.xmlDebugDumpString(output, str)
def shellPrintXPathError(errorType, arg):
"""Print the xpath error to libxml default error channel """
libxml2mod.xmlShellPrintXPathError(errorType, arg)
#
# Functions from module dict
#
def dictCleanup():
"""Free the dictionary mutex. Do not call unless sure the
library is not in use anymore ! """
libxml2mod.xmlDictCleanup()
def initializeDict():
"""Do the dictionary mutex initialization. this function is
deprecated """
ret = libxml2mod.xmlInitializeDict()
return ret
#
# Functions from module encoding
#
def addEncodingAlias(name, alias):
"""Registers an alias @alias for an encoding named @name.
Existing alias will be overwritten. """
ret = libxml2mod.xmlAddEncodingAlias(name, alias)
return ret
def cleanupCharEncodingHandlers():
"""Cleanup the memory allocated for the char encoding support,
it unregisters all the encoding handlers and the aliases. """
libxml2mod.xmlCleanupCharEncodingHandlers()
def cleanupEncodingAliases():
"""Unregisters all aliases """
libxml2mod.xmlCleanupEncodingAliases()
def delEncodingAlias(alias):
"""Unregisters an encoding alias @alias """
ret = libxml2mod.xmlDelEncodingAlias(alias)
return ret
def encodingAlias(alias):
"""Lookup an encoding name for the given alias. """
ret = libxml2mod.xmlGetEncodingAlias(alias)
return ret
def initCharEncodingHandlers():
"""Initialize the char encoding support, it registers the
default encoding supported. NOTE: while public, this
function usually doesn't need to be called in normal
processing. """
libxml2mod.xmlInitCharEncodingHandlers()
#
# Functions from module entities
#
def cleanupPredefinedEntities():
"""Cleanup up the predefined entities table. Deprecated call """
libxml2mod.xmlCleanupPredefinedEntities()
def initializePredefinedEntities():
"""Set up the predefined entities. Deprecated call """
libxml2mod.xmlInitializePredefinedEntities()
def predefinedEntity(name):
"""Check whether this name is an predefined entity. """
ret = libxml2mod.xmlGetPredefinedEntity(name)
if ret is None:raise treeError('xmlGetPredefinedEntity() failed')
return xmlEntity(_obj=ret)
#
# Functions from module globals
#
def cleanupGlobals():
"""Additional cleanup for multi-threading """
libxml2mod.xmlCleanupGlobals()
def initGlobals():
"""Additional initialisation for multi-threading """
libxml2mod.xmlInitGlobals()
def thrDefDefaultBufferSize(v):
ret = libxml2mod.xmlThrDefDefaultBufferSize(v)
return ret
def thrDefDoValidityCheckingDefaultValue(v):
ret = libxml2mod.xmlThrDefDoValidityCheckingDefaultValue(v)
return ret
def thrDefGetWarningsDefaultValue(v):
ret = libxml2mod.xmlThrDefGetWarningsDefaultValue(v)
return ret
def thrDefIndentTreeOutput(v):
ret = libxml2mod.xmlThrDefIndentTreeOutput(v)
return ret
def thrDefKeepBlanksDefaultValue(v):
ret = libxml2mod.xmlThrDefKeepBlanksDefaultValue(v)
return ret
def thrDefLineNumbersDefaultValue(v):
ret = libxml2mod.xmlThrDefLineNumbersDefaultValue(v)
return ret
def thrDefLoadExtDtdDefaultValue(v):
ret = libxml2mod.xmlThrDefLoadExtDtdDefaultValue(v)
return ret
def thrDefParserDebugEntities(v):
ret = libxml2mod.xmlThrDefParserDebugEntities(v)
return ret
def thrDefPedanticParserDefaultValue(v):
ret = libxml2mod.xmlThrDefPedanticParserDefaultValue(v)
return ret
def thrDefSaveNoEmptyTags(v):
ret = libxml2mod.xmlThrDefSaveNoEmptyTags(v)
return ret
def thrDefSubstituteEntitiesDefaultValue(v):
ret = libxml2mod.xmlThrDefSubstituteEntitiesDefaultValue(v)
return ret
def thrDefTreeIndentString(v):
ret = libxml2mod.xmlThrDefTreeIndentString(v)
return ret
#
# Functions from module nanoftp
#
def nanoFTPCleanup():
"""Cleanup the FTP protocol layer. This cleanup proxy
informations. """
libxml2mod.xmlNanoFTPCleanup()
def nanoFTPInit():
"""Initialize the FTP protocol layer. Currently it just checks
for proxy informations, and get the hostname """
libxml2mod.xmlNanoFTPInit()
def nanoFTPProxy(host, port, user, passwd, type):
"""Setup the FTP proxy informations. This can also be done by
using ftp_proxy ftp_proxy_user and ftp_proxy_password
environment variables. """
libxml2mod.xmlNanoFTPProxy(host, port, user, passwd, type)
def nanoFTPScanProxy(URL):
"""(Re)Initialize the FTP Proxy context by parsing the URL and
finding the protocol host port it indicates. Should be like
ftp://myproxy/ or ftp://myproxy:3128/ A None URL cleans up
proxy informations. """
libxml2mod.xmlNanoFTPScanProxy(URL)
#
# Functions from module nanohttp
#
def nanoHTTPCleanup():
"""Cleanup the HTTP protocol layer. """
libxml2mod.xmlNanoHTTPCleanup()
def nanoHTTPInit():
"""Initialize the HTTP protocol layer. Currently it just
checks for proxy informations """
libxml2mod.xmlNanoHTTPInit()
def nanoHTTPScanProxy(URL):
"""(Re)Initialize the HTTP Proxy context by parsing the URL
and finding the protocol host port it indicates. Should be
like http://myproxy/ or http://myproxy:3128/ A None URL
cleans up proxy informations. """
libxml2mod.xmlNanoHTTPScanProxy(URL)
#
# Functions from module parser
#
def createDocParserCtxt(cur):
"""Creates a parser context for an XML in-memory document. """
ret = libxml2mod.xmlCreateDocParserCtxt(cur)
if ret is None:raise parserError('xmlCreateDocParserCtxt() failed')
return parserCtxt(_obj=ret)
def initParser():
"""Initialization function for the XML parser. This is not
reentrant. Call once before processing in case of use in
multithreaded programs. """
libxml2mod.xmlInitParser()
def keepBlanksDefault(val):
"""Set and return the previous value for default blanks text
nodes support. The 1.x version of the parser used an
heuristic to try to detect ignorable white spaces. As a
result the SAX callback was generating
xmlSAX2IgnorableWhitespace() callbacks instead of
characters() one, and when using the DOM output text nodes
containing those blanks were not generated. The 2.x and
later version will switch to the XML standard way and
ignorableWhitespace() are only generated when running the
parser in validating mode and when the current element
doesn't allow CDATA or mixed content. This function is
provided as a way to force the standard behavior on 1.X
libs and to switch back to the old mode for compatibility
when running 1.X client code on 2.X . Upgrade of 1.X code
should be done by using xmlIsBlankNode() commodity function
to detect the "empty" nodes generated. This value also
affect autogeneration of indentation when saving code if
blanks sections are kept, indentation is not generated. """
ret = libxml2mod.xmlKeepBlanksDefault(val)
return ret
def lineNumbersDefault(val):
"""Set and return the previous value for enabling line numbers
in elements contents. This may break on old application and
is turned off by default. """
ret = libxml2mod.xmlLineNumbersDefault(val)
return ret
def newParserCtxt():
"""Allocate and initialize a new parser context. """
ret = libxml2mod.xmlNewParserCtxt()
if ret is None:raise parserError('xmlNewParserCtxt() failed')
return parserCtxt(_obj=ret)
def parseDTD(ExternalID, SystemID):
"""Load and parse an external subset. """
ret = libxml2mod.xmlParseDTD(ExternalID, SystemID)
if ret is None:raise parserError('xmlParseDTD() failed')
return xmlDtd(_obj=ret)
def parseDoc(cur):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlParseDoc(cur)
if ret is None:raise parserError('xmlParseDoc() failed')
return xmlDoc(_obj=ret)
def parseEntity(filename):
"""parse an XML external entity out of context and build a
tree. [78] extParsedEnt ::= TextDecl? content This
correspond to a "Well Balanced" chunk """
ret = libxml2mod.xmlParseEntity(filename)
if ret is None:raise parserError('xmlParseEntity() failed')
return xmlDoc(_obj=ret)
def parseFile(filename):
"""parse an XML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. """
ret = libxml2mod.xmlParseFile(filename)
if ret is None:raise parserError('xmlParseFile() failed')
return xmlDoc(_obj=ret)
def parseMemory(buffer, size):
"""parse an XML in-memory block and build a tree. """
ret = libxml2mod.xmlParseMemory(buffer, size)
if ret is None:raise parserError('xmlParseMemory() failed')
return xmlDoc(_obj=ret)
def pedanticParserDefault(val):
"""Set and return the previous value for enabling pedantic
warnings. """
ret = libxml2mod.xmlPedanticParserDefault(val)
return ret
def readDoc(cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlReadDoc(cur, URL, encoding, options)
if ret is None:raise treeError('xmlReadDoc() failed')
return xmlDoc(_obj=ret)
def readFd(fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. NOTE
that the file descriptor will not be closed when the reader
is closed or reset. """
ret = libxml2mod.xmlReadFd(fd, URL, encoding, options)
if ret is None:raise treeError('xmlReadFd() failed')
return xmlDoc(_obj=ret)
def readFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. """
ret = libxml2mod.xmlReadFile(filename, encoding, options)
if ret is None:raise treeError('xmlReadFile() failed')
return xmlDoc(_obj=ret)
def readMemory(buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. """
ret = libxml2mod.xmlReadMemory(buffer, size, URL, encoding, options)
if ret is None:raise treeError('xmlReadMemory() failed')
return xmlDoc(_obj=ret)
def recoverDoc(cur):
"""parse an XML in-memory document and build a tree. In the
case the document is not Well Formed, a attempt to build a
tree is tried anyway """
ret = libxml2mod.xmlRecoverDoc(cur)
if ret is None:raise treeError('xmlRecoverDoc() failed')
return xmlDoc(_obj=ret)
def recoverFile(filename):
"""parse an XML file and build a tree. Automatic support for
ZLIB/Compress compressed document is provided by default if
found at compile-time. In the case the document is not Well
Formed, it attempts to build a tree anyway """
ret = libxml2mod.xmlRecoverFile(filename)
if ret is None:raise treeError('xmlRecoverFile() failed')
return xmlDoc(_obj=ret)
def recoverMemory(buffer, size):
"""parse an XML in-memory block and build a tree. In the case
the document is not Well Formed, an attempt to build a tree
is tried anyway """
ret = libxml2mod.xmlRecoverMemory(buffer, size)
if ret is None:raise treeError('xmlRecoverMemory() failed')
return xmlDoc(_obj=ret)
def substituteEntitiesDefault(val):
"""Set and return the previous value for default entity
support. Initially the parser always keep entity references
instead of substituting entity values in the output. This
function has to be used to change the default parser
behavior SAX::substituteEntities() has to be used for
changing that on a file by file basis. """
ret = libxml2mod.xmlSubstituteEntitiesDefault(val)
return ret
#
# Functions from module parserInternals
#
def checkLanguageID(lang):
"""Checks that the value conforms to the LanguageID
production: NOTE: this is somewhat deprecated, those
productions were removed from the XML Second edition. [33]
LanguageID ::= Langcode ('-' Subcode)* [34] Langcode ::=
ISO639Code | IanaCode | UserCode [35] ISO639Code ::=
([a-z] | [A-Z]) ([a-z] | [A-Z]) [36] IanaCode ::= ('i' |
'I') '-' ([a-z] | [A-Z])+ [37] UserCode ::= ('x' | 'X') '-'
([a-z] | [A-Z])+ [38] Subcode ::= ([a-z] | [A-Z])+ The
current REC reference the sucessors of RFC 1766, currently
5646 http://www.rfc-editor.org/rfc/rfc5646.txt langtag
= language ["-" script] ["-" region] *("-" variant) *("-"
extension) ["-" privateuse] language = 2*3ALPHA
; shortest ISO 639 code ["-" extlang] ; sometimes
followed by ; extended language subtags / 4ALPHA
; or reserved for future use / 5*8ALPHA ; or
registered language subtag extlang = 3ALPHA
; selected ISO 639 codes *2("-" 3ALPHA) ; permanently
reserved script = 4ALPHA ; ISO 15924
code region = 2ALPHA ; ISO 3166-1 code
/ 3DIGIT ; UN M.49 code variant =
5*8alphanum ; registered variants / (DIGIT
3alphanum) extension = singleton 1*("-" (2*8alphanum))
; Single alphanumerics ; "x" reserved for private use
singleton = DIGIT ; 0 - 9 / %x41-57
; A - W / %x59-5A ; Y - Z / %x61-77
; a - w / %x79-7A ; y - z it sounds right to
still allow Irregular i-xxx IANA and user codes too The
parser below doesn't try to cope with extension or
privateuse that could be added but that's not interoperable
anyway """
ret = libxml2mod.xmlCheckLanguageID(lang)
return ret
def copyChar(len, out, val):
"""append the char value in the array """
ret = libxml2mod.xmlCopyChar(len, out, val)
return ret
def copyCharMultiByte(out, val):
"""append the char value in the array """
ret = libxml2mod.xmlCopyCharMultiByte(out, val)
return ret
def createEntityParserCtxt(URL, ID, base):
"""Create a parser context for an external entity Automatic
support for ZLIB/Compress compressed document is provided
by default if found at compile-time. """
ret = libxml2mod.xmlCreateEntityParserCtxt(URL, ID, base)
if ret is None:raise parserError('xmlCreateEntityParserCtxt() failed')
return parserCtxt(_obj=ret)
def createFileParserCtxt(filename):
"""Create a parser context for a file content. Automatic
support for ZLIB/Compress compressed document is provided
by default if found at compile-time. """
ret = libxml2mod.xmlCreateFileParserCtxt(filename)
if ret is None:raise parserError('xmlCreateFileParserCtxt() failed')
return parserCtxt(_obj=ret)
def createMemoryParserCtxt(buffer, size):
"""Create a parser context for an XML in-memory document. """
ret = libxml2mod.xmlCreateMemoryParserCtxt(buffer, size)
if ret is None:raise parserError('xmlCreateMemoryParserCtxt() failed')
return parserCtxt(_obj=ret)
def createURLParserCtxt(filename, options):
"""Create a parser context for a file or URL content.
Automatic support for ZLIB/Compress compressed document is
provided by default if found at compile-time and for file
accesses """
ret = libxml2mod.xmlCreateURLParserCtxt(filename, options)
if ret is None:raise parserError('xmlCreateURLParserCtxt() failed')
return parserCtxt(_obj=ret)
def htmlCreateFileParserCtxt(filename, encoding):
"""Create a parser context for a file content. Automatic
support for ZLIB/Compress compressed document is provided
by default if found at compile-time. """
ret = libxml2mod.htmlCreateFileParserCtxt(filename, encoding)
if ret is None:raise parserError('htmlCreateFileParserCtxt() failed')
return parserCtxt(_obj=ret)
def htmlInitAutoClose():
"""Initialize the htmlStartCloseIndex for fast lookup of
closing tags names. This is not reentrant. Call
xmlInitParser() once before processing in case of use in
multithreaded programs. """
libxml2mod.htmlInitAutoClose()
def isLetter(c):
"""Check whether the character is allowed by the production
[84] Letter ::= BaseChar | Ideographic """
ret = libxml2mod.xmlIsLetter(c)
return ret
def namePop(ctxt):
"""Pops the top element name from the name stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.namePop(ctxt__o)
return ret
def namePush(ctxt, value):
"""Pushes a new element name on top of the name stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.namePush(ctxt__o, value)
return ret
def nodePop(ctxt):
"""Pops the top element node from the node stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.nodePop(ctxt__o)
if ret is None:raise treeError('nodePop() failed')
return xmlNode(_obj=ret)
def nodePush(ctxt, value):
"""Pushes a new element node on top of the node stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if value is None: value__o = None
else: value__o = value._o
ret = libxml2mod.nodePush(ctxt__o, value__o)
return ret
#
# Functions from module python
#
def SAXParseFile(SAX, URI, recover):
"""Interface to parse an XML file or resource pointed by an
URI to build an event flow to the SAX object """
libxml2mod.xmlSAXParseFile(SAX, URI, recover)
def createInputBuffer(file, encoding):
"""Create a libxml2 input buffer from a Python file """
ret = libxml2mod.xmlCreateInputBuffer(file, encoding)
if ret is None:raise treeError('xmlCreateInputBuffer() failed')
return inputBuffer(_obj=ret)
def createOutputBuffer(file, encoding):
"""Create a libxml2 output buffer from a Python file """
ret = libxml2mod.xmlCreateOutputBuffer(file, encoding)
if ret is None:raise treeError('xmlCreateOutputBuffer() failed')
return outputBuffer(_obj=ret)
def createPushParser(SAX, chunk, size, URI):
"""Create a progressive XML parser context to build either an
event flow if the SAX object is not None, or a DOM tree
otherwise. """
ret = libxml2mod.xmlCreatePushParser(SAX, chunk, size, URI)
if ret is None:raise parserError('xmlCreatePushParser() failed')
return parserCtxt(_obj=ret)
def debugMemory(activate):
"""Switch on the generation of line number for elements nodes.
Also returns the number of bytes allocated and not freed by
libxml2 since memory debugging was switched on. """
ret = libxml2mod.xmlDebugMemory(activate)
return ret
def dumpMemory():
"""dump the memory allocated in the file .memdump """
libxml2mod.xmlDumpMemory()
def htmlCreatePushParser(SAX, chunk, size, URI):
"""Create a progressive HTML parser context to build either an
event flow if the SAX object is not None, or a DOM tree
otherwise. """
ret = libxml2mod.htmlCreatePushParser(SAX, chunk, size, URI)
if ret is None:raise parserError('htmlCreatePushParser() failed')
return parserCtxt(_obj=ret)
def htmlSAXParseFile(SAX, URI, encoding):
"""Interface to parse an HTML file or resource pointed by an
URI to build an event flow to the SAX object """
libxml2mod.htmlSAXParseFile(SAX, URI, encoding)
def memoryUsed():
"""Returns the total amount of memory allocated by libxml2 """
ret = libxml2mod.xmlMemoryUsed()
return ret
def newNode(name):
"""Create a new Node """
ret = libxml2mod.xmlNewNode(name)
if ret is None:raise treeError('xmlNewNode() failed')
return xmlNode(_obj=ret)
def pythonCleanupParser():
"""Cleanup function for the XML library. It tries to reclaim
all parsing related global memory allocated for the library
processing. It doesn't deallocate any document related
memory. Calling this function should not prevent reusing
the library but one should call xmlCleanupParser() only
when the process has finished using the library or XML
document built with it. """
libxml2mod.xmlPythonCleanupParser()
def setEntityLoader(resolver):
"""Set the entity resolver as a python function """
ret = libxml2mod.xmlSetEntityLoader(resolver)
return ret
#
# Functions from module relaxng
#
def relaxNGCleanupTypes():
"""Cleanup the default Schemas type library associated to
RelaxNG """
libxml2mod.xmlRelaxNGCleanupTypes()
def relaxNGInitTypes():
"""Initilize the default type libraries. """
ret = libxml2mod.xmlRelaxNGInitTypes()
return ret
def relaxNGNewMemParserCtxt(buffer, size):
"""Create an XML RelaxNGs parse context for that memory buffer
expected to contain an XML RelaxNGs file. """
ret = libxml2mod.xmlRelaxNGNewMemParserCtxt(buffer, size)
if ret is None:raise parserError('xmlRelaxNGNewMemParserCtxt() failed')
return relaxNgParserCtxt(_obj=ret)
def relaxNGNewParserCtxt(URL):
"""Create an XML RelaxNGs parse context for that file/resource
expected to contain an XML RelaxNGs file. """
ret = libxml2mod.xmlRelaxNGNewParserCtxt(URL)
if ret is None:raise parserError('xmlRelaxNGNewParserCtxt() failed')
return relaxNgParserCtxt(_obj=ret)
#
# Functions from module tree
#
def buildQName(ncname, prefix, memory, len):
"""Builds the QName @prefix:@ncname in @memory if there is
enough space and prefix is not None nor empty, otherwise
allocate a new string. If prefix is None or empty it
returns ncname. """
ret = libxml2mod.xmlBuildQName(ncname, prefix, memory, len)
return ret
def compressMode():
"""get the default compression mode used, ZLIB based. """
ret = libxml2mod.xmlGetCompressMode()
return ret
def isXHTML(systemID, publicID):
"""Try to find if the document correspond to an XHTML DTD """
ret = libxml2mod.xmlIsXHTML(systemID, publicID)
return ret
def newComment(content):
"""Creation of a new node containing a comment. """
ret = libxml2mod.xmlNewComment(content)
if ret is None:raise treeError('xmlNewComment() failed')
return xmlNode(_obj=ret)
def newDoc(version):
"""Creates a new XML document """
ret = libxml2mod.xmlNewDoc(version)
if ret is None:raise treeError('xmlNewDoc() failed')
return xmlDoc(_obj=ret)
def newPI(name, content):
"""Creation of a processing instruction element. Use
xmlDocNewPI preferably to get string interning """
ret = libxml2mod.xmlNewPI(name, content)
if ret is None:raise treeError('xmlNewPI() failed')
return xmlNode(_obj=ret)
def newText(content):
"""Creation of a new text node. """
ret = libxml2mod.xmlNewText(content)
if ret is None:raise treeError('xmlNewText() failed')
return xmlNode(_obj=ret)
def newTextLen(content, len):
"""Creation of a new text node with an extra parameter for the
content's length """
ret = libxml2mod.xmlNewTextLen(content, len)
if ret is None:raise treeError('xmlNewTextLen() failed')
return xmlNode(_obj=ret)
def setCompressMode(mode):
"""set the default compression mode used, ZLIB based Correct
values: 0 (uncompressed) to 9 (max compression) """
libxml2mod.xmlSetCompressMode(mode)
def validateNCName(value, space):
"""Check that a value conforms to the lexical space of NCName """
ret = libxml2mod.xmlValidateNCName(value, space)
return ret
def validateNMToken(value, space):
"""Check that a value conforms to the lexical space of NMToken """
ret = libxml2mod.xmlValidateNMToken(value, space)
return ret
def validateName(value, space):
"""Check that a value conforms to the lexical space of Name """
ret = libxml2mod.xmlValidateName(value, space)
return ret
def validateQName(value, space):
"""Check that a value conforms to the lexical space of QName """
ret = libxml2mod.xmlValidateQName(value, space)
return ret
#
# Functions from module uri
#
def URIEscape(str):
"""Escaping routine, does not do validity checks ! It will try
to escape the chars needing this, but this is heuristic
based it's impossible to be sure. """
ret = libxml2mod.xmlURIEscape(str)
return ret
def URIEscapeStr(str, list):
"""This routine escapes a string to hex, ignoring reserved
characters (a-z) and the characters in the exception list. """
ret = libxml2mod.xmlURIEscapeStr(str, list)
return ret
def URIUnescapeString(str, len, target):
"""Unescaping routine, but does not check that the string is
an URI. The output is a direct unsigned char translation of
%XX values (no encoding) Note that the length of the result
can only be smaller or same size as the input string. """
ret = libxml2mod.xmlURIUnescapeString(str, len, target)
return ret
def buildRelativeURI(URI, base):
"""Expresses the URI of the reference in terms relative to the
base. Some examples of this operation include: base =
"http://site1.com/docs/book1.html" URI input
URI returned docs/pic1.gif pic1.gif
docs/img/pic1.gif img/pic1.gif img/pic1.gif
../img/pic1.gif http://site1.com/docs/pic1.gif pic1.gif
http://site2.com/docs/pic1.gif
http://site2.com/docs/pic1.gif base = "docs/book1.html"
URI input URI returned docs/pic1.gif
pic1.gif docs/img/pic1.gif img/pic1.gif
img/pic1.gif ../img/pic1.gif
http://site1.com/docs/pic1.gif
http://site1.com/docs/pic1.gif Note: if the URI reference
is really wierd or complicated, it may be worthwhile to
first convert it into a "nice" one by calling xmlBuildURI
(using 'base') before calling this routine, since this
routine (for reasonable efficiency) assumes URI has already
been through some validation. """
ret = libxml2mod.xmlBuildRelativeURI(URI, base)
return ret
def buildURI(URI, base):
"""Computes he final URI of the reference done by checking
that the given URI is valid, and building the final URI
using the base URI. This is processed according to section
5.2 of the RFC 2396 5.2. Resolving Relative References to
Absolute Form """
ret = libxml2mod.xmlBuildURI(URI, base)
return ret
def canonicPath(path):
"""Constructs a canonic path from the specified path. """
ret = libxml2mod.xmlCanonicPath(path)
return ret
def createURI():
"""Simply creates an empty xmlURI """
ret = libxml2mod.xmlCreateURI()
if ret is None:raise uriError('xmlCreateURI() failed')
return URI(_obj=ret)
def normalizeURIPath(path):
"""Applies the 5 normalization steps to a path string--that
is, RFC 2396 Section 5.2, steps 6.c through 6.g.
Normalization occurs directly on the string, no new
allocation is done """
ret = libxml2mod.xmlNormalizeURIPath(path)
return ret
def parseURI(str):
"""Parse an URI based on RFC 3986 URI-reference = [
absoluteURI | relativeURI ] [ "#" fragment ] """
ret = libxml2mod.xmlParseURI(str)
if ret is None:raise uriError('xmlParseURI() failed')
return URI(_obj=ret)
def parseURIRaw(str, raw):
"""Parse an URI but allows to keep intact the original
fragments. URI-reference = URI / relative-ref """
ret = libxml2mod.xmlParseURIRaw(str, raw)
if ret is None:raise uriError('xmlParseURIRaw() failed')
return URI(_obj=ret)
def pathToURI(path):
"""Constructs an URI expressing the existing path """
ret = libxml2mod.xmlPathToURI(path)
return ret
#
# Functions from module valid
#
def newValidCtxt():
"""Allocate a validation context structure. """
ret = libxml2mod.xmlNewValidCtxt()
if ret is None:raise treeError('xmlNewValidCtxt() failed')
return ValidCtxt(_obj=ret)
def validateNameValue(value):
"""Validate that the given value match Name production """
ret = libxml2mod.xmlValidateNameValue(value)
return ret
def validateNamesValue(value):
"""Validate that the given value match Names production """
ret = libxml2mod.xmlValidateNamesValue(value)
return ret
def validateNmtokenValue(value):
"""Validate that the given value match Nmtoken production [
VC: Name Token ] """
ret = libxml2mod.xmlValidateNmtokenValue(value)
return ret
def validateNmtokensValue(value):
"""Validate that the given value match Nmtokens production [
VC: Name Token ] """
ret = libxml2mod.xmlValidateNmtokensValue(value)
return ret
#
# Functions from module xmlIO
#
def checkFilename(path):
"""function checks to see if @path is a valid source (file,
socket...) for XML. if stat is not available on the target
machine, """
ret = libxml2mod.xmlCheckFilename(path)
return ret
def cleanupInputCallbacks():
"""clears the entire input callback table. this includes the
compiled-in I/O. """
libxml2mod.xmlCleanupInputCallbacks()
def cleanupOutputCallbacks():
"""clears the entire output callback table. this includes the
compiled-in I/O callbacks. """
libxml2mod.xmlCleanupOutputCallbacks()
def fileMatch(filename):
"""input from FILE * """
ret = libxml2mod.xmlFileMatch(filename)
return ret
def iOFTPMatch(filename):
"""check if the URI matches an FTP one """
ret = libxml2mod.xmlIOFTPMatch(filename)
return ret
def iOHTTPMatch(filename):
"""check if the URI matches an HTTP one """
ret = libxml2mod.xmlIOHTTPMatch(filename)
return ret
def normalizeWindowsPath(path):
"""This function is obsolete. Please see xmlURIFromPath in
uri.c for a better solution. """
ret = libxml2mod.xmlNormalizeWindowsPath(path)
return ret
def parserGetDirectory(filename):
"""lookup the directory for that file """
ret = libxml2mod.xmlParserGetDirectory(filename)
return ret
def registerDefaultInputCallbacks():
"""Registers the default compiled-in I/O handlers. """
libxml2mod.xmlRegisterDefaultInputCallbacks()
def registerDefaultOutputCallbacks():
"""Registers the default compiled-in I/O handlers. """
libxml2mod.xmlRegisterDefaultOutputCallbacks()
def registerHTTPPostCallbacks():
"""By default, libxml submits HTTP output requests using the
"PUT" method. Calling this method changes the HTTP output
method to use the "POST" method instead. """
libxml2mod.xmlRegisterHTTPPostCallbacks()
#
# Functions from module xmlerror
#
def lastError():
"""Get the last global error registered. This is per thread if
compiled with thread support. """
ret = libxml2mod.xmlGetLastError()
if ret is None:raise treeError('xmlGetLastError() failed')
return Error(_obj=ret)
def resetLastError():
"""Cleanup the last global error registered. For parsing error
this does not change the well-formedness result. """
libxml2mod.xmlResetLastError()
#
# Functions from module xmlreader
#
def newTextReaderFilename(URI):
"""Create an xmlTextReader structure fed with the resource at
@URI """
ret = libxml2mod.xmlNewTextReaderFilename(URI)
if ret is None:raise treeError('xmlNewTextReaderFilename() failed')
return xmlTextReader(_obj=ret)
def readerForDoc(cur, URL, encoding, options):
"""Create an xmltextReader for an XML in-memory document. The
parsing flags @options are a combination of xmlParserOption. """
ret = libxml2mod.xmlReaderForDoc(cur, URL, encoding, options)
if ret is None:raise treeError('xmlReaderForDoc() failed')
return xmlTextReader(_obj=ret)
def readerForFd(fd, URL, encoding, options):
"""Create an xmltextReader for an XML from a file descriptor.
The parsing flags @options are a combination of
xmlParserOption. NOTE that the file descriptor will not be
closed when the reader is closed or reset. """
ret = libxml2mod.xmlReaderForFd(fd, URL, encoding, options)
if ret is None:raise treeError('xmlReaderForFd() failed')
return xmlTextReader(_obj=ret)
def readerForFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. The
parsing flags @options are a combination of xmlParserOption. """
ret = libxml2mod.xmlReaderForFile(filename, encoding, options)
if ret is None:raise treeError('xmlReaderForFile() failed')
return xmlTextReader(_obj=ret)
def readerForMemory(buffer, size, URL, encoding, options):
"""Create an xmltextReader for an XML in-memory document. The
parsing flags @options are a combination of xmlParserOption. """
ret = libxml2mod.xmlReaderForMemory(buffer, size, URL, encoding, options)
if ret is None:raise treeError('xmlReaderForMemory() failed')
return xmlTextReader(_obj=ret)
#
# Functions from module xmlregexp
#
def regexpCompile(regexp):
"""Parses a regular expression conforming to XML Schemas Part
2 Datatype Appendix F and builds an automata suitable for
testing strings against that regular expression """
ret = libxml2mod.xmlRegexpCompile(regexp)
if ret is None:raise treeError('xmlRegexpCompile() failed')
return xmlReg(_obj=ret)
#
# Functions from module xmlschemas
#
def schemaNewMemParserCtxt(buffer, size):
"""Create an XML Schemas parse context for that memory buffer
expected to contain an XML Schemas file. """
ret = libxml2mod.xmlSchemaNewMemParserCtxt(buffer, size)
if ret is None:raise parserError('xmlSchemaNewMemParserCtxt() failed')
return SchemaParserCtxt(_obj=ret)
def schemaNewParserCtxt(URL):
"""Create an XML Schemas parse context for that file/resource
expected to contain an XML Schemas file. """
ret = libxml2mod.xmlSchemaNewParserCtxt(URL)
if ret is None:raise parserError('xmlSchemaNewParserCtxt() failed')
return SchemaParserCtxt(_obj=ret)
#
# Functions from module xmlschemastypes
#
def schemaCleanupTypes():
"""Cleanup the default XML Schemas type library """
libxml2mod.xmlSchemaCleanupTypes()
def schemaCollapseString(value):
"""Removes and normalize white spaces in the string """
ret = libxml2mod.xmlSchemaCollapseString(value)
return ret
def schemaInitTypes():
"""Initialize the default XML Schemas type library """
libxml2mod.xmlSchemaInitTypes()
def schemaWhiteSpaceReplace(value):
"""Replaces 0xd, 0x9 and 0xa with a space. """
ret = libxml2mod.xmlSchemaWhiteSpaceReplace(value)
return ret
#
# Functions from module xmlstring
#
def UTF8Charcmp(utf1, utf2):
"""compares the two UCS4 values """
ret = libxml2mod.xmlUTF8Charcmp(utf1, utf2)
return ret
def UTF8Size(utf):
"""calculates the internal size of a UTF8 character """
ret = libxml2mod.xmlUTF8Size(utf)
return ret
def UTF8Strlen(utf):
"""compute the length of an UTF8 string, it doesn't do a full
UTF8 checking of the content of the string. """
ret = libxml2mod.xmlUTF8Strlen(utf)
return ret
def UTF8Strloc(utf, utfchar):
"""a function to provide the relative location of a UTF8 char """
ret = libxml2mod.xmlUTF8Strloc(utf, utfchar)
return ret
def UTF8Strndup(utf, len):
"""a strndup for array of UTF8's """
ret = libxml2mod.xmlUTF8Strndup(utf, len)
return ret
def UTF8Strpos(utf, pos):
"""a function to provide the equivalent of fetching a
character from a string array """
ret = libxml2mod.xmlUTF8Strpos(utf, pos)
return ret
def UTF8Strsize(utf, len):
"""storage size of an UTF8 string the behaviour is not
guaranteed if the input string is not UTF-8 """
ret = libxml2mod.xmlUTF8Strsize(utf, len)
return ret
def UTF8Strsub(utf, start, len):
"""Create a substring from a given UTF-8 string Note:
positions are given in units of UTF-8 chars """
ret = libxml2mod.xmlUTF8Strsub(utf, start, len)
return ret
def checkUTF8(utf):
"""Checks @utf for being valid UTF-8. @utf is assumed to be
null-terminated. This function is not super-strict, as it
will allow longer UTF-8 sequences than necessary. Note that
Java is capable of producing these sequences if provoked.
Also note, this routine checks for the 4-byte maximum size,
but does not check for 0x10ffff maximum value. """
ret = libxml2mod.xmlCheckUTF8(utf)
return ret
#
# Functions from module xmlunicode
#
def uCSIsAegeanNumbers(code):
"""Check whether the character is part of AegeanNumbers UCS
Block """
ret = libxml2mod.xmlUCSIsAegeanNumbers(code)
return ret
def uCSIsAlphabeticPresentationForms(code):
"""Check whether the character is part of
AlphabeticPresentationForms UCS Block """
ret = libxml2mod.xmlUCSIsAlphabeticPresentationForms(code)
return ret
def uCSIsArabic(code):
"""Check whether the character is part of Arabic UCS Block """
ret = libxml2mod.xmlUCSIsArabic(code)
return ret
def uCSIsArabicPresentationFormsA(code):
"""Check whether the character is part of
ArabicPresentationForms-A UCS Block """
ret = libxml2mod.xmlUCSIsArabicPresentationFormsA(code)
return ret
def uCSIsArabicPresentationFormsB(code):
"""Check whether the character is part of
ArabicPresentationForms-B UCS Block """
ret = libxml2mod.xmlUCSIsArabicPresentationFormsB(code)
return ret
def uCSIsArmenian(code):
"""Check whether the character is part of Armenian UCS Block """
ret = libxml2mod.xmlUCSIsArmenian(code)
return ret
def uCSIsArrows(code):
"""Check whether the character is part of Arrows UCS Block """
ret = libxml2mod.xmlUCSIsArrows(code)
return ret
def uCSIsBasicLatin(code):
"""Check whether the character is part of BasicLatin UCS Block """
ret = libxml2mod.xmlUCSIsBasicLatin(code)
return ret
def uCSIsBengali(code):
"""Check whether the character is part of Bengali UCS Block """
ret = libxml2mod.xmlUCSIsBengali(code)
return ret
def uCSIsBlock(code, block):
"""Check whether the character is part of the UCS Block """
ret = libxml2mod.xmlUCSIsBlock(code, block)
return ret
def uCSIsBlockElements(code):
"""Check whether the character is part of BlockElements UCS
Block """
ret = libxml2mod.xmlUCSIsBlockElements(code)
return ret
def uCSIsBopomofo(code):
"""Check whether the character is part of Bopomofo UCS Block """
ret = libxml2mod.xmlUCSIsBopomofo(code)
return ret
def uCSIsBopomofoExtended(code):
"""Check whether the character is part of BopomofoExtended UCS
Block """
ret = libxml2mod.xmlUCSIsBopomofoExtended(code)
return ret
def uCSIsBoxDrawing(code):
"""Check whether the character is part of BoxDrawing UCS Block """
ret = libxml2mod.xmlUCSIsBoxDrawing(code)
return ret
def uCSIsBraillePatterns(code):
"""Check whether the character is part of BraillePatterns UCS
Block """
ret = libxml2mod.xmlUCSIsBraillePatterns(code)
return ret
def uCSIsBuhid(code):
"""Check whether the character is part of Buhid UCS Block """
ret = libxml2mod.xmlUCSIsBuhid(code)
return ret
def uCSIsByzantineMusicalSymbols(code):
"""Check whether the character is part of
ByzantineMusicalSymbols UCS Block """
ret = libxml2mod.xmlUCSIsByzantineMusicalSymbols(code)
return ret
def uCSIsCJKCompatibility(code):
"""Check whether the character is part of CJKCompatibility UCS
Block """
ret = libxml2mod.xmlUCSIsCJKCompatibility(code)
return ret
def uCSIsCJKCompatibilityForms(code):
"""Check whether the character is part of
CJKCompatibilityForms UCS Block """
ret = libxml2mod.xmlUCSIsCJKCompatibilityForms(code)
return ret
def uCSIsCJKCompatibilityIdeographs(code):
"""Check whether the character is part of
CJKCompatibilityIdeographs UCS Block """
ret = libxml2mod.xmlUCSIsCJKCompatibilityIdeographs(code)
return ret
def uCSIsCJKCompatibilityIdeographsSupplement(code):
"""Check whether the character is part of
CJKCompatibilityIdeographsSupplement UCS Block """
ret = libxml2mod.xmlUCSIsCJKCompatibilityIdeographsSupplement(code)
return ret
def uCSIsCJKRadicalsSupplement(code):
"""Check whether the character is part of
CJKRadicalsSupplement UCS Block """
ret = libxml2mod.xmlUCSIsCJKRadicalsSupplement(code)
return ret
def uCSIsCJKSymbolsandPunctuation(code):
"""Check whether the character is part of
CJKSymbolsandPunctuation UCS Block """
ret = libxml2mod.xmlUCSIsCJKSymbolsandPunctuation(code)
return ret
def uCSIsCJKUnifiedIdeographs(code):
"""Check whether the character is part of CJKUnifiedIdeographs
UCS Block """
ret = libxml2mod.xmlUCSIsCJKUnifiedIdeographs(code)
return ret
def uCSIsCJKUnifiedIdeographsExtensionA(code):
"""Check whether the character is part of
CJKUnifiedIdeographsExtensionA UCS Block """
ret = libxml2mod.xmlUCSIsCJKUnifiedIdeographsExtensionA(code)
return ret
def uCSIsCJKUnifiedIdeographsExtensionB(code):
"""Check whether the character is part of
CJKUnifiedIdeographsExtensionB UCS Block """
ret = libxml2mod.xmlUCSIsCJKUnifiedIdeographsExtensionB(code)
return ret
def uCSIsCat(code, cat):
"""Check whether the character is part of the UCS Category """
ret = libxml2mod.xmlUCSIsCat(code, cat)
return ret
def uCSIsCatC(code):
"""Check whether the character is part of C UCS Category """
ret = libxml2mod.xmlUCSIsCatC(code)
return ret
def uCSIsCatCc(code):
"""Check whether the character is part of Cc UCS Category """
ret = libxml2mod.xmlUCSIsCatCc(code)
return ret
def uCSIsCatCf(code):
"""Check whether the character is part of Cf UCS Category """
ret = libxml2mod.xmlUCSIsCatCf(code)
return ret
def uCSIsCatCo(code):
"""Check whether the character is part of Co UCS Category """
ret = libxml2mod.xmlUCSIsCatCo(code)
return ret
def uCSIsCatCs(code):
"""Check whether the character is part of Cs UCS Category """
ret = libxml2mod.xmlUCSIsCatCs(code)
return ret
def uCSIsCatL(code):
"""Check whether the character is part of L UCS Category """
ret = libxml2mod.xmlUCSIsCatL(code)
return ret
def uCSIsCatLl(code):
"""Check whether the character is part of Ll UCS Category """
ret = libxml2mod.xmlUCSIsCatLl(code)
return ret
def uCSIsCatLm(code):
"""Check whether the character is part of Lm UCS Category """
ret = libxml2mod.xmlUCSIsCatLm(code)
return ret
def uCSIsCatLo(code):
"""Check whether the character is part of Lo UCS Category """
ret = libxml2mod.xmlUCSIsCatLo(code)
return ret
def uCSIsCatLt(code):
"""Check whether the character is part of Lt UCS Category """
ret = libxml2mod.xmlUCSIsCatLt(code)
return ret
def uCSIsCatLu(code):
"""Check whether the character is part of Lu UCS Category """
ret = libxml2mod.xmlUCSIsCatLu(code)
return ret
def uCSIsCatM(code):
"""Check whether the character is part of M UCS Category """
ret = libxml2mod.xmlUCSIsCatM(code)
return ret
def uCSIsCatMc(code):
"""Check whether the character is part of Mc UCS Category """
ret = libxml2mod.xmlUCSIsCatMc(code)
return ret
def uCSIsCatMe(code):
"""Check whether the character is part of Me UCS Category """
ret = libxml2mod.xmlUCSIsCatMe(code)
return ret
def uCSIsCatMn(code):
"""Check whether the character is part of Mn UCS Category """
ret = libxml2mod.xmlUCSIsCatMn(code)
return ret
def uCSIsCatN(code):
"""Check whether the character is part of N UCS Category """
ret = libxml2mod.xmlUCSIsCatN(code)
return ret
def uCSIsCatNd(code):
"""Check whether the character is part of Nd UCS Category """
ret = libxml2mod.xmlUCSIsCatNd(code)
return ret
def uCSIsCatNl(code):
"""Check whether the character is part of Nl UCS Category """
ret = libxml2mod.xmlUCSIsCatNl(code)
return ret
def uCSIsCatNo(code):
"""Check whether the character is part of No UCS Category """
ret = libxml2mod.xmlUCSIsCatNo(code)
return ret
def uCSIsCatP(code):
"""Check whether the character is part of P UCS Category """
ret = libxml2mod.xmlUCSIsCatP(code)
return ret
def uCSIsCatPc(code):
"""Check whether the character is part of Pc UCS Category """
ret = libxml2mod.xmlUCSIsCatPc(code)
return ret
def uCSIsCatPd(code):
"""Check whether the character is part of Pd UCS Category """
ret = libxml2mod.xmlUCSIsCatPd(code)
return ret
def uCSIsCatPe(code):
"""Check whether the character is part of Pe UCS Category """
ret = libxml2mod.xmlUCSIsCatPe(code)
return ret
def uCSIsCatPf(code):
"""Check whether the character is part of Pf UCS Category """
ret = libxml2mod.xmlUCSIsCatPf(code)
return ret
def uCSIsCatPi(code):
"""Check whether the character is part of Pi UCS Category """
ret = libxml2mod.xmlUCSIsCatPi(code)
return ret
def uCSIsCatPo(code):
"""Check whether the character is part of Po UCS Category """
ret = libxml2mod.xmlUCSIsCatPo(code)
return ret
def uCSIsCatPs(code):
"""Check whether the character is part of Ps UCS Category """
ret = libxml2mod.xmlUCSIsCatPs(code)
return ret
def uCSIsCatS(code):
"""Check whether the character is part of S UCS Category """
ret = libxml2mod.xmlUCSIsCatS(code)
return ret
def uCSIsCatSc(code):
"""Check whether the character is part of Sc UCS Category """
ret = libxml2mod.xmlUCSIsCatSc(code)
return ret
def uCSIsCatSk(code):
"""Check whether the character is part of Sk UCS Category """
ret = libxml2mod.xmlUCSIsCatSk(code)
return ret
def uCSIsCatSm(code):
"""Check whether the character is part of Sm UCS Category """
ret = libxml2mod.xmlUCSIsCatSm(code)
return ret
def uCSIsCatSo(code):
"""Check whether the character is part of So UCS Category """
ret = libxml2mod.xmlUCSIsCatSo(code)
return ret
def uCSIsCatZ(code):
"""Check whether the character is part of Z UCS Category """
ret = libxml2mod.xmlUCSIsCatZ(code)
return ret
def uCSIsCatZl(code):
"""Check whether the character is part of Zl UCS Category """
ret = libxml2mod.xmlUCSIsCatZl(code)
return ret
def uCSIsCatZp(code):
"""Check whether the character is part of Zp UCS Category """
ret = libxml2mod.xmlUCSIsCatZp(code)
return ret
def uCSIsCatZs(code):
"""Check whether the character is part of Zs UCS Category """
ret = libxml2mod.xmlUCSIsCatZs(code)
return ret
def uCSIsCherokee(code):
"""Check whether the character is part of Cherokee UCS Block """
ret = libxml2mod.xmlUCSIsCherokee(code)
return ret
def uCSIsCombiningDiacriticalMarks(code):
"""Check whether the character is part of
CombiningDiacriticalMarks UCS Block """
ret = libxml2mod.xmlUCSIsCombiningDiacriticalMarks(code)
return ret
def uCSIsCombiningDiacriticalMarksforSymbols(code):
"""Check whether the character is part of
CombiningDiacriticalMarksforSymbols UCS Block """
ret = libxml2mod.xmlUCSIsCombiningDiacriticalMarksforSymbols(code)
return ret
def uCSIsCombiningHalfMarks(code):
"""Check whether the character is part of CombiningHalfMarks
UCS Block """
ret = libxml2mod.xmlUCSIsCombiningHalfMarks(code)
return ret
def uCSIsCombiningMarksforSymbols(code):
"""Check whether the character is part of
CombiningMarksforSymbols UCS Block """
ret = libxml2mod.xmlUCSIsCombiningMarksforSymbols(code)
return ret
def uCSIsControlPictures(code):
"""Check whether the character is part of ControlPictures UCS
Block """
ret = libxml2mod.xmlUCSIsControlPictures(code)
return ret
def uCSIsCurrencySymbols(code):
"""Check whether the character is part of CurrencySymbols UCS
Block """
ret = libxml2mod.xmlUCSIsCurrencySymbols(code)
return ret
def uCSIsCypriotSyllabary(code):
"""Check whether the character is part of CypriotSyllabary UCS
Block """
ret = libxml2mod.xmlUCSIsCypriotSyllabary(code)
return ret
def uCSIsCyrillic(code):
"""Check whether the character is part of Cyrillic UCS Block """
ret = libxml2mod.xmlUCSIsCyrillic(code)
return ret
def uCSIsCyrillicSupplement(code):
"""Check whether the character is part of CyrillicSupplement
UCS Block """
ret = libxml2mod.xmlUCSIsCyrillicSupplement(code)
return ret
def uCSIsDeseret(code):
"""Check whether the character is part of Deseret UCS Block """
ret = libxml2mod.xmlUCSIsDeseret(code)
return ret
def uCSIsDevanagari(code):
"""Check whether the character is part of Devanagari UCS Block """
ret = libxml2mod.xmlUCSIsDevanagari(code)
return ret
def uCSIsDingbats(code):
"""Check whether the character is part of Dingbats UCS Block """
ret = libxml2mod.xmlUCSIsDingbats(code)
return ret
def uCSIsEnclosedAlphanumerics(code):
"""Check whether the character is part of
EnclosedAlphanumerics UCS Block """
ret = libxml2mod.xmlUCSIsEnclosedAlphanumerics(code)
return ret
def uCSIsEnclosedCJKLettersandMonths(code):
"""Check whether the character is part of
EnclosedCJKLettersandMonths UCS Block """
ret = libxml2mod.xmlUCSIsEnclosedCJKLettersandMonths(code)
return ret
def uCSIsEthiopic(code):
"""Check whether the character is part of Ethiopic UCS Block """
ret = libxml2mod.xmlUCSIsEthiopic(code)
return ret
def uCSIsGeneralPunctuation(code):
"""Check whether the character is part of GeneralPunctuation
UCS Block """
ret = libxml2mod.xmlUCSIsGeneralPunctuation(code)
return ret
def uCSIsGeometricShapes(code):
"""Check whether the character is part of GeometricShapes UCS
Block """
ret = libxml2mod.xmlUCSIsGeometricShapes(code)
return ret
def uCSIsGeorgian(code):
"""Check whether the character is part of Georgian UCS Block """
ret = libxml2mod.xmlUCSIsGeorgian(code)
return ret
def uCSIsGothic(code):
"""Check whether the character is part of Gothic UCS Block """
ret = libxml2mod.xmlUCSIsGothic(code)
return ret
def uCSIsGreek(code):
"""Check whether the character is part of Greek UCS Block """
ret = libxml2mod.xmlUCSIsGreek(code)
return ret
def uCSIsGreekExtended(code):
"""Check whether the character is part of GreekExtended UCS
Block """
ret = libxml2mod.xmlUCSIsGreekExtended(code)
return ret
def uCSIsGreekandCoptic(code):
"""Check whether the character is part of GreekandCoptic UCS
Block """
ret = libxml2mod.xmlUCSIsGreekandCoptic(code)
return ret
def uCSIsGujarati(code):
"""Check whether the character is part of Gujarati UCS Block """
ret = libxml2mod.xmlUCSIsGujarati(code)
return ret
def uCSIsGurmukhi(code):
"""Check whether the character is part of Gurmukhi UCS Block """
ret = libxml2mod.xmlUCSIsGurmukhi(code)
return ret
def uCSIsHalfwidthandFullwidthForms(code):
"""Check whether the character is part of
HalfwidthandFullwidthForms UCS Block """
ret = libxml2mod.xmlUCSIsHalfwidthandFullwidthForms(code)
return ret
def uCSIsHangulCompatibilityJamo(code):
"""Check whether the character is part of
HangulCompatibilityJamo UCS Block """
ret = libxml2mod.xmlUCSIsHangulCompatibilityJamo(code)
return ret
def uCSIsHangulJamo(code):
"""Check whether the character is part of HangulJamo UCS Block """
ret = libxml2mod.xmlUCSIsHangulJamo(code)
return ret
def uCSIsHangulSyllables(code):
"""Check whether the character is part of HangulSyllables UCS
Block """
ret = libxml2mod.xmlUCSIsHangulSyllables(code)
return ret
def uCSIsHanunoo(code):
"""Check whether the character is part of Hanunoo UCS Block """
ret = libxml2mod.xmlUCSIsHanunoo(code)
return ret
def uCSIsHebrew(code):
"""Check whether the character is part of Hebrew UCS Block """
ret = libxml2mod.xmlUCSIsHebrew(code)
return ret
def uCSIsHighPrivateUseSurrogates(code):
"""Check whether the character is part of
HighPrivateUseSurrogates UCS Block """
ret = libxml2mod.xmlUCSIsHighPrivateUseSurrogates(code)
return ret
def uCSIsHighSurrogates(code):
"""Check whether the character is part of HighSurrogates UCS
Block """
ret = libxml2mod.xmlUCSIsHighSurrogates(code)
return ret
def uCSIsHiragana(code):
"""Check whether the character is part of Hiragana UCS Block """
ret = libxml2mod.xmlUCSIsHiragana(code)
return ret
def uCSIsIPAExtensions(code):
"""Check whether the character is part of IPAExtensions UCS
Block """
ret = libxml2mod.xmlUCSIsIPAExtensions(code)
return ret
def uCSIsIdeographicDescriptionCharacters(code):
"""Check whether the character is part of
IdeographicDescriptionCharacters UCS Block """
ret = libxml2mod.xmlUCSIsIdeographicDescriptionCharacters(code)
return ret
def uCSIsKanbun(code):
"""Check whether the character is part of Kanbun UCS Block """
ret = libxml2mod.xmlUCSIsKanbun(code)
return ret
def uCSIsKangxiRadicals(code):
"""Check whether the character is part of KangxiRadicals UCS
Block """
ret = libxml2mod.xmlUCSIsKangxiRadicals(code)
return ret
def uCSIsKannada(code):
"""Check whether the character is part of Kannada UCS Block """
ret = libxml2mod.xmlUCSIsKannada(code)
return ret
def uCSIsKatakana(code):
"""Check whether the character is part of Katakana UCS Block """
ret = libxml2mod.xmlUCSIsKatakana(code)
return ret
def uCSIsKatakanaPhoneticExtensions(code):
"""Check whether the character is part of
KatakanaPhoneticExtensions UCS Block """
ret = libxml2mod.xmlUCSIsKatakanaPhoneticExtensions(code)
return ret
def uCSIsKhmer(code):
"""Check whether the character is part of Khmer UCS Block """
ret = libxml2mod.xmlUCSIsKhmer(code)
return ret
def uCSIsKhmerSymbols(code):
"""Check whether the character is part of KhmerSymbols UCS
Block """
ret = libxml2mod.xmlUCSIsKhmerSymbols(code)
return ret
def uCSIsLao(code):
"""Check whether the character is part of Lao UCS Block """
ret = libxml2mod.xmlUCSIsLao(code)
return ret
def uCSIsLatin1Supplement(code):
"""Check whether the character is part of Latin-1Supplement
UCS Block """
ret = libxml2mod.xmlUCSIsLatin1Supplement(code)
return ret
def uCSIsLatinExtendedA(code):
"""Check whether the character is part of LatinExtended-A UCS
Block """
ret = libxml2mod.xmlUCSIsLatinExtendedA(code)
return ret
def uCSIsLatinExtendedAdditional(code):
"""Check whether the character is part of
LatinExtendedAdditional UCS Block """
ret = libxml2mod.xmlUCSIsLatinExtendedAdditional(code)
return ret
def uCSIsLatinExtendedB(code):
"""Check whether the character is part of LatinExtended-B UCS
Block """
ret = libxml2mod.xmlUCSIsLatinExtendedB(code)
return ret
def uCSIsLetterlikeSymbols(code):
"""Check whether the character is part of LetterlikeSymbols
UCS Block """
ret = libxml2mod.xmlUCSIsLetterlikeSymbols(code)
return ret
def uCSIsLimbu(code):
"""Check whether the character is part of Limbu UCS Block """
ret = libxml2mod.xmlUCSIsLimbu(code)
return ret
def uCSIsLinearBIdeograms(code):
"""Check whether the character is part of LinearBIdeograms UCS
Block """
ret = libxml2mod.xmlUCSIsLinearBIdeograms(code)
return ret
def uCSIsLinearBSyllabary(code):
"""Check whether the character is part of LinearBSyllabary UCS
Block """
ret = libxml2mod.xmlUCSIsLinearBSyllabary(code)
return ret
def uCSIsLowSurrogates(code):
"""Check whether the character is part of LowSurrogates UCS
Block """
ret = libxml2mod.xmlUCSIsLowSurrogates(code)
return ret
def uCSIsMalayalam(code):
"""Check whether the character is part of Malayalam UCS Block """
ret = libxml2mod.xmlUCSIsMalayalam(code)
return ret
def uCSIsMathematicalAlphanumericSymbols(code):
"""Check whether the character is part of
MathematicalAlphanumericSymbols UCS Block """
ret = libxml2mod.xmlUCSIsMathematicalAlphanumericSymbols(code)
return ret
def uCSIsMathematicalOperators(code):
"""Check whether the character is part of
MathematicalOperators UCS Block """
ret = libxml2mod.xmlUCSIsMathematicalOperators(code)
return ret
def uCSIsMiscellaneousMathematicalSymbolsA(code):
"""Check whether the character is part of
MiscellaneousMathematicalSymbols-A UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousMathematicalSymbolsA(code)
return ret
def uCSIsMiscellaneousMathematicalSymbolsB(code):
"""Check whether the character is part of
MiscellaneousMathematicalSymbols-B UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousMathematicalSymbolsB(code)
return ret
def uCSIsMiscellaneousSymbols(code):
"""Check whether the character is part of MiscellaneousSymbols
UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousSymbols(code)
return ret
def uCSIsMiscellaneousSymbolsandArrows(code):
"""Check whether the character is part of
MiscellaneousSymbolsandArrows UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousSymbolsandArrows(code)
return ret
def uCSIsMiscellaneousTechnical(code):
"""Check whether the character is part of
MiscellaneousTechnical UCS Block """
ret = libxml2mod.xmlUCSIsMiscellaneousTechnical(code)
return ret
def uCSIsMongolian(code):
"""Check whether the character is part of Mongolian UCS Block """
ret = libxml2mod.xmlUCSIsMongolian(code)
return ret
def uCSIsMusicalSymbols(code):
"""Check whether the character is part of MusicalSymbols UCS
Block """
ret = libxml2mod.xmlUCSIsMusicalSymbols(code)
return ret
def uCSIsMyanmar(code):
"""Check whether the character is part of Myanmar UCS Block """
ret = libxml2mod.xmlUCSIsMyanmar(code)
return ret
def uCSIsNumberForms(code):
"""Check whether the character is part of NumberForms UCS Block """
ret = libxml2mod.xmlUCSIsNumberForms(code)
return ret
def uCSIsOgham(code):
"""Check whether the character is part of Ogham UCS Block """
ret = libxml2mod.xmlUCSIsOgham(code)
return ret
def uCSIsOldItalic(code):
"""Check whether the character is part of OldItalic UCS Block """
ret = libxml2mod.xmlUCSIsOldItalic(code)
return ret
def uCSIsOpticalCharacterRecognition(code):
"""Check whether the character is part of
OpticalCharacterRecognition UCS Block """
ret = libxml2mod.xmlUCSIsOpticalCharacterRecognition(code)
return ret
def uCSIsOriya(code):
"""Check whether the character is part of Oriya UCS Block """
ret = libxml2mod.xmlUCSIsOriya(code)
return ret
def uCSIsOsmanya(code):
"""Check whether the character is part of Osmanya UCS Block """
ret = libxml2mod.xmlUCSIsOsmanya(code)
return ret
def uCSIsPhoneticExtensions(code):
"""Check whether the character is part of PhoneticExtensions
UCS Block """
ret = libxml2mod.xmlUCSIsPhoneticExtensions(code)
return ret
def uCSIsPrivateUse(code):
"""Check whether the character is part of PrivateUse UCS Block """
ret = libxml2mod.xmlUCSIsPrivateUse(code)
return ret
def uCSIsPrivateUseArea(code):
"""Check whether the character is part of PrivateUseArea UCS
Block """
ret = libxml2mod.xmlUCSIsPrivateUseArea(code)
return ret
def uCSIsRunic(code):
"""Check whether the character is part of Runic UCS Block """
ret = libxml2mod.xmlUCSIsRunic(code)
return ret
def uCSIsShavian(code):
"""Check whether the character is part of Shavian UCS Block """
ret = libxml2mod.xmlUCSIsShavian(code)
return ret
def uCSIsSinhala(code):
"""Check whether the character is part of Sinhala UCS Block """
ret = libxml2mod.xmlUCSIsSinhala(code)
return ret
def uCSIsSmallFormVariants(code):
"""Check whether the character is part of SmallFormVariants
UCS Block """
ret = libxml2mod.xmlUCSIsSmallFormVariants(code)
return ret
def uCSIsSpacingModifierLetters(code):
"""Check whether the character is part of
SpacingModifierLetters UCS Block """
ret = libxml2mod.xmlUCSIsSpacingModifierLetters(code)
return ret
def uCSIsSpecials(code):
"""Check whether the character is part of Specials UCS Block """
ret = libxml2mod.xmlUCSIsSpecials(code)
return ret
def uCSIsSuperscriptsandSubscripts(code):
"""Check whether the character is part of
SuperscriptsandSubscripts UCS Block """
ret = libxml2mod.xmlUCSIsSuperscriptsandSubscripts(code)
return ret
def uCSIsSupplementalArrowsA(code):
"""Check whether the character is part of SupplementalArrows-A
UCS Block """
ret = libxml2mod.xmlUCSIsSupplementalArrowsA(code)
return ret
def uCSIsSupplementalArrowsB(code):
"""Check whether the character is part of SupplementalArrows-B
UCS Block """
ret = libxml2mod.xmlUCSIsSupplementalArrowsB(code)
return ret
def uCSIsSupplementalMathematicalOperators(code):
"""Check whether the character is part of
SupplementalMathematicalOperators UCS Block """
ret = libxml2mod.xmlUCSIsSupplementalMathematicalOperators(code)
return ret
def uCSIsSupplementaryPrivateUseAreaA(code):
"""Check whether the character is part of
SupplementaryPrivateUseArea-A UCS Block """
ret = libxml2mod.xmlUCSIsSupplementaryPrivateUseAreaA(code)
return ret
def uCSIsSupplementaryPrivateUseAreaB(code):
"""Check whether the character is part of
SupplementaryPrivateUseArea-B UCS Block """
ret = libxml2mod.xmlUCSIsSupplementaryPrivateUseAreaB(code)
return ret
def uCSIsSyriac(code):
"""Check whether the character is part of Syriac UCS Block """
ret = libxml2mod.xmlUCSIsSyriac(code)
return ret
def uCSIsTagalog(code):
"""Check whether the character is part of Tagalog UCS Block """
ret = libxml2mod.xmlUCSIsTagalog(code)
return ret
def uCSIsTagbanwa(code):
"""Check whether the character is part of Tagbanwa UCS Block """
ret = libxml2mod.xmlUCSIsTagbanwa(code)
return ret
def uCSIsTags(code):
"""Check whether the character is part of Tags UCS Block """
ret = libxml2mod.xmlUCSIsTags(code)
return ret
def uCSIsTaiLe(code):
"""Check whether the character is part of TaiLe UCS Block """
ret = libxml2mod.xmlUCSIsTaiLe(code)
return ret
def uCSIsTaiXuanJingSymbols(code):
"""Check whether the character is part of TaiXuanJingSymbols
UCS Block """
ret = libxml2mod.xmlUCSIsTaiXuanJingSymbols(code)
return ret
def uCSIsTamil(code):
"""Check whether the character is part of Tamil UCS Block """
ret = libxml2mod.xmlUCSIsTamil(code)
return ret
def uCSIsTelugu(code):
"""Check whether the character is part of Telugu UCS Block """
ret = libxml2mod.xmlUCSIsTelugu(code)
return ret
def uCSIsThaana(code):
"""Check whether the character is part of Thaana UCS Block """
ret = libxml2mod.xmlUCSIsThaana(code)
return ret
def uCSIsThai(code):
"""Check whether the character is part of Thai UCS Block """
ret = libxml2mod.xmlUCSIsThai(code)
return ret
def uCSIsTibetan(code):
"""Check whether the character is part of Tibetan UCS Block """
ret = libxml2mod.xmlUCSIsTibetan(code)
return ret
def uCSIsUgaritic(code):
"""Check whether the character is part of Ugaritic UCS Block """
ret = libxml2mod.xmlUCSIsUgaritic(code)
return ret
def uCSIsUnifiedCanadianAboriginalSyllabics(code):
"""Check whether the character is part of
UnifiedCanadianAboriginalSyllabics UCS Block """
ret = libxml2mod.xmlUCSIsUnifiedCanadianAboriginalSyllabics(code)
return ret
def uCSIsVariationSelectors(code):
"""Check whether the character is part of VariationSelectors
UCS Block """
ret = libxml2mod.xmlUCSIsVariationSelectors(code)
return ret
def uCSIsVariationSelectorsSupplement(code):
"""Check whether the character is part of
VariationSelectorsSupplement UCS Block """
ret = libxml2mod.xmlUCSIsVariationSelectorsSupplement(code)
return ret
def uCSIsYiRadicals(code):
"""Check whether the character is part of YiRadicals UCS Block """
ret = libxml2mod.xmlUCSIsYiRadicals(code)
return ret
def uCSIsYiSyllables(code):
"""Check whether the character is part of YiSyllables UCS Block """
ret = libxml2mod.xmlUCSIsYiSyllables(code)
return ret
def uCSIsYijingHexagramSymbols(code):
"""Check whether the character is part of
YijingHexagramSymbols UCS Block """
ret = libxml2mod.xmlUCSIsYijingHexagramSymbols(code)
return ret
#
# Functions from module xmlversion
#
def checkVersion(version):
"""check the compiled lib version against the include one.
This can warn or immediately kill the application """
libxml2mod.xmlCheckVersion(version)
#
# Functions from module xpathInternals
#
def valuePop(ctxt):
"""Pops the top XPath object from the value stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.valuePop(ctxt__o)
return ret
class xmlNode(xmlCore):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlNode got a wrong wrapper object type')
self._o = _obj
xmlCore.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlNode (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
# accessors for xmlNode
def ns(self):
"""Get the namespace of a node """
ret = libxml2mod.xmlNodeGetNs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp
def nsDefs(self):
"""Get the namespace of a node """
ret = libxml2mod.xmlNodeGetNsDefs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp
#
# xmlNode functions from module debugXML
#
def debugDumpNode(self, output, depth):
"""Dumps debug information for the element node, it is
recursive """
libxml2mod.xmlDebugDumpNode(output, self._o, depth)
def debugDumpNodeList(self, output, depth):
"""Dumps debug information for the list of element node, it is
recursive """
libxml2mod.xmlDebugDumpNodeList(output, self._o, depth)
def debugDumpOneNode(self, output, depth):
"""Dumps debug information for the element node, it is not
recursive """
libxml2mod.xmlDebugDumpOneNode(output, self._o, depth)
def lsCountNode(self):
"""Count the children of @node. """
ret = libxml2mod.xmlLsCountNode(self._o)
return ret
def lsOneNode(self, output):
"""Dump to @output the type and name of @node. """
libxml2mod.xmlLsOneNode(output, self._o)
def shellPrintNode(self):
"""Print node to the output FILE """
libxml2mod.xmlShellPrintNode(self._o)
#
# xmlNode functions from module tree
#
def addChild(self, cur):
"""Add a new node to @parent, at the end of the child (or
property) list merging adjacent TEXT nodes (in which case
@cur is freed) If the new node is ATTRIBUTE, it is added
into properties instead of children. If there is an
attribute with equal name, it is first destroyed. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlAddChild(self._o, cur__o)
if ret is None:raise treeError('xmlAddChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def addChildList(self, cur):
"""Add a list of node at the end of the child list of the
parent merging adjacent TEXT nodes (@cur may be freed) """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlAddChildList(self._o, cur__o)
if ret is None:raise treeError('xmlAddChildList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def addContent(self, content):
"""Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContent(), @content is supposed to be
raw text, so unescaped XML special chars are allowed,
entity references are not supported. """
libxml2mod.xmlNodeAddContent(self._o, content)
def addContentLen(self, content, len):
"""Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. """
libxml2mod.xmlNodeAddContentLen(self._o, content, len)
def addNextSibling(self, elem):
"""Add a new node @elem as the next sibling of @cur If the new
node was already inserted in a document it is first
unlinked from its existing context. As a result of text
merging @elem may be freed. If the new node is ATTRIBUTE,
it is added into properties instead of children. If there
is an attribute with equal name, it is first destroyed. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlAddNextSibling(self._o, elem__o)
if ret is None:raise treeError('xmlAddNextSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def addPrevSibling(self, elem):
"""Add a new node @elem as the previous sibling of @cur
merging adjacent TEXT nodes (@elem may be freed) If the new
node was already inserted in a document it is first
unlinked from its existing context. If the new node is
ATTRIBUTE, it is added into properties instead of children.
If there is an attribute with equal name, it is first
destroyed. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlAddPrevSibling(self._o, elem__o)
if ret is None:raise treeError('xmlAddPrevSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def addSibling(self, elem):
"""Add a new element @elem to the list of siblings of @cur
merging adjacent TEXT nodes (@elem may be freed) If the new
element was already inserted in a document it is first
unlinked from its existing context. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlAddSibling(self._o, elem__o)
if ret is None:raise treeError('xmlAddSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def copyNode(self, extended):
"""Do a copy of the node. """
ret = libxml2mod.xmlCopyNode(self._o, extended)
if ret is None:raise treeError('xmlCopyNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def copyNodeList(self):
"""Do a recursive copy of the node list. Use
xmlDocCopyNodeList() if possible to ensure string interning. """
ret = libxml2mod.xmlCopyNodeList(self._o)
if ret is None:raise treeError('xmlCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def copyProp(self, cur):
"""Do a copy of the attribute. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlCopyProp(self._o, cur__o)
if ret is None:raise treeError('xmlCopyProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def copyPropList(self, cur):
"""Do a copy of an attribute list. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlCopyPropList(self._o, cur__o)
if ret is None:raise treeError('xmlCopyPropList() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def docCopyNode(self, doc, extended):
"""Do a copy of the node to a given document. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlDocCopyNode(self._o, doc__o, extended)
if ret is None:raise treeError('xmlDocCopyNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def docCopyNodeList(self, doc):
"""Do a recursive copy of the node list. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlDocCopyNodeList(doc__o, self._o)
if ret is None:raise treeError('xmlDocCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def docSetRootElement(self, doc):
"""Set the root element of the document (doc->children is a
list containing possibly comments, PIs, etc ...). """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlDocSetRootElement(doc__o, self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def firstElementChild(self):
"""Finds the first child node of that element which is a
Element node Note the handling of entities references is
different than in the W3C DOM element traversal spec since
we don't have back reference from entities content to
entities references. """
ret = libxml2mod.xmlFirstElementChild(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def freeNode(self):
"""Free a node, this is a recursive behaviour, all the
children are freed too. This doesn't unlink the child from
the list, use xmlUnlinkNode() first. """
libxml2mod.xmlFreeNode(self._o)
def freeNodeList(self):
"""Free a node and all its siblings, this is a recursive
behaviour, all the children are freed too. """
libxml2mod.xmlFreeNodeList(self._o)
def getBase(self, doc):
"""Searches for the BASE URL. The code should work on both XML
and HTML document even if base mechanisms are completely
different. It returns the base as defined in RFC 2396
sections 5.1.1. Base URI within Document Content and 5.1.2.
Base URI from the Encapsulating Entity However it does not
return the document base (5.1.3), use doc->URL in this case """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNodeGetBase(doc__o, self._o)
return ret
def getContent(self):
"""Read the value of a node, this can be either the text
carried directly by this node if it's a TEXT node or the
aggregate string of the values carried by this node child's
(TEXT and ENTITY_REF). Entity references are substituted. """
ret = libxml2mod.xmlNodeGetContent(self._o)
return ret
def getLang(self):
"""Searches the language of a node, i.e. the values of the
xml:lang attribute or the one carried by the nearest
ancestor. """
ret = libxml2mod.xmlNodeGetLang(self._o)
return ret
def getSpacePreserve(self):
"""Searches the space preserving behaviour of a node, i.e. the
values of the xml:space attribute or the one carried by the
nearest ancestor. """
ret = libxml2mod.xmlNodeGetSpacePreserve(self._o)
return ret
def hasNsProp(self, name, nameSpace):
"""Search for an attribute associated to a node This attribute
has to be anchored in the namespace specified. This does
the entity substitution. This function looks in DTD
attribute declaration for #FIXED or default declaration
values unless DTD use has been turned off. Note that a
namespace of None indicates to use the default namespace. """
ret = libxml2mod.xmlHasNsProp(self._o, name, nameSpace)
if ret is None:return None
__tmp = xmlAttr(_obj=ret)
return __tmp
def hasProp(self, name):
"""Search an attribute associated to a node This function also
looks in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off. """
ret = libxml2mod.xmlHasProp(self._o, name)
if ret is None:return None
__tmp = xmlAttr(_obj=ret)
return __tmp
def isBlankNode(self):
"""Checks whether this node is an empty or whitespace only
(and possibly ignorable) text-node. """
ret = libxml2mod.xmlIsBlankNode(self._o)
return ret
def isText(self):
"""Is this node a Text node ? """
ret = libxml2mod.xmlNodeIsText(self._o)
return ret
def lastChild(self):
"""Search the last child of a node. """
ret = libxml2mod.xmlGetLastChild(self._o)
if ret is None:raise treeError('xmlGetLastChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def lastElementChild(self):
"""Finds the last child node of that element which is a
Element node Note the handling of entities references is
different than in the W3C DOM element traversal spec since
we don't have back reference from entities content to
entities references. """
ret = libxml2mod.xmlLastElementChild(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def lineNo(self):
"""Get line number of @node. Try to override the limitation of
lines being store in 16 bits ints if XML_PARSE_BIG_LINES
parser option was used """
ret = libxml2mod.xmlGetLineNo(self._o)
return ret
def listGetRawString(self, doc, inLine):
"""Builds the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs, contrary to
xmlNodeListGetString() this function doesn't do any
character encoding handling. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNodeListGetRawString(doc__o, self._o, inLine)
return ret
def listGetString(self, doc, inLine):
"""Build the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNodeListGetString(doc__o, self._o, inLine)
return ret
def newChild(self, ns, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child list containing the TEXTs and ENTITY_REFs node will
be created. NOTE: @content is supposed to be a piece of XML
CDATA, so it allows entity references. XML special chars
must be escaped first by using
xmlEncodeEntitiesReentrant(), or xmlNewTextChild() should
be used. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewChild(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newNs(self, href, prefix):
"""Creation of a new Namespace. This function will refuse to
create a namespace with a similar prefix than an existing
one present on this node. Note that for a default
namespace, @prefix should be None. We use href==None in
the case of an element creation where the namespace was not
defined. """
ret = libxml2mod.xmlNewNs(self._o, href, prefix)
if ret is None:raise treeError('xmlNewNs() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def newNsProp(self, ns, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewNsProp(self._o, ns__o, name, value)
if ret is None:raise treeError('xmlNewNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newNsPropEatName(self, ns, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewNsPropEatName(self._o, ns__o, name, value)
if ret is None:raise treeError('xmlNewNsPropEatName() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newProp(self, name, value):
"""Create a new property carried by a node. """
ret = libxml2mod.xmlNewProp(self._o, name, value)
if ret is None:raise treeError('xmlNewProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newTextChild(self, ns, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child TEXT node will be created containing the string
@content. NOTE: Use xmlNewChild() if @content will contain
entities that need to be preserved. Use this function,
xmlNewTextChild(), if you need to ensure that reserved XML
chars that might appear in @content, such as the ampersand,
greater-than or less-than signs, are automatically replaced
by their XML escaped entity representations. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewTextChild(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewTextChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def nextElementSibling(self):
"""Finds the first closest next sibling of the node which is
an element node. Note the handling of entities references
is different than in the W3C DOM element traversal spec
since we don't have back reference from entities content to
entities references. """
ret = libxml2mod.xmlNextElementSibling(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def noNsProp(self, name):
"""Search and get the value of an attribute associated to a
node This does the entity substitution. This function looks
in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off. This
function is similar to xmlGetProp except it will accept
only an attribute in no namespace. """
ret = libxml2mod.xmlGetNoNsProp(self._o, name)
return ret
def nodePath(self):
"""Build a structure based Path for the given node """
ret = libxml2mod.xmlGetNodePath(self._o)
return ret
def nsProp(self, name, nameSpace):
"""Search and get the value of an attribute associated to a
node This attribute has to be anchored in the namespace
specified. This does the entity substitution. This function
looks in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off. """
ret = libxml2mod.xmlGetNsProp(self._o, name, nameSpace)
return ret
def previousElementSibling(self):
"""Finds the first closest previous sibling of the node which
is an element node. Note the handling of entities
references is different than in the W3C DOM element
traversal spec since we don't have back reference from
entities content to entities references. """
ret = libxml2mod.xmlPreviousElementSibling(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def prop(self, name):
"""Search and get the value of an attribute associated to a
node This does the entity substitution. This function looks
in DTD attribute declaration for #FIXED or default
declaration values unless DTD use has been turned off.
NOTE: this function acts independently of namespaces
associated to the attribute. Use xmlGetNsProp() or
xmlGetNoNsProp() for namespace aware processing. """
ret = libxml2mod.xmlGetProp(self._o, name)
return ret
def reconciliateNs(self, doc):
"""This function checks that all the namespaces declared
within the given tree are properly declared. This is needed
for example after Copy or Cut and then paste operations.
The subtree may still hold pointers to namespace
declarations outside the subtree or invalid/masked. As much
as possible the function try to reuse the existing
namespaces found in the new environment. If not possible
the new namespaces are redeclared on @tree at the top of
the given subtree. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlReconciliateNs(doc__o, self._o)
return ret
def replaceNode(self, cur):
"""Unlink the old node from its current context, prune the new
one at the same place. If @cur was already inserted in a
document it is first unlinked from its existing context. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlReplaceNode(self._o, cur__o)
if ret is None:raise treeError('xmlReplaceNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def searchNs(self, doc, nameSpace):
"""Search a Ns registered under a given name space for a
document. recurse on the parents until it finds the defined
namespace or return None otherwise. @nameSpace can be None,
this is a search for the default namespace. We don't allow
to cross entities boundaries. If you don't declare the
namespace within those you will be in troubles !!! A
warning is generated to cover this case. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlSearchNs(doc__o, self._o, nameSpace)
if ret is None:raise treeError('xmlSearchNs() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def searchNsByHref(self, doc, href):
"""Search a Ns aliasing a given URI. Recurse on the parents
until it finds the defined namespace or return None
otherwise. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlSearchNsByHref(doc__o, self._o, href)
if ret is None:raise treeError('xmlSearchNsByHref() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def setBase(self, uri):
"""Set (or reset) the base URI of a node, i.e. the value of
the xml:base attribute. """
libxml2mod.xmlNodeSetBase(self._o, uri)
def setContent(self, content):
"""Replace the content of a node. NOTE: @content is supposed
to be a piece of XML CDATA, so it allows entity references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). """
libxml2mod.xmlNodeSetContent(self._o, content)
def setContentLen(self, content, len):
"""Replace the content of a node. NOTE: @content is supposed
to be a piece of XML CDATA, so it allows entity references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). """
libxml2mod.xmlNodeSetContentLen(self._o, content, len)
def setLang(self, lang):
"""Set the language of a node, i.e. the values of the xml:lang
attribute. """
libxml2mod.xmlNodeSetLang(self._o, lang)
def setListDoc(self, doc):
"""update all nodes in the list to point to the right document """
if doc is None: doc__o = None
else: doc__o = doc._o
libxml2mod.xmlSetListDoc(self._o, doc__o)
def setName(self, name):
"""Set (or reset) the name of a node. """
libxml2mod.xmlNodeSetName(self._o, name)
def setNs(self, ns):
"""Associate a namespace to a node, a posteriori. """
if ns is None: ns__o = None
else: ns__o = ns._o
libxml2mod.xmlSetNs(self._o, ns__o)
def setNsProp(self, ns, name, value):
"""Set (or reset) an attribute carried by a node. The ns
structure must be in scope, this is not checked """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlSetNsProp(self._o, ns__o, name, value)
if ret is None:raise treeError('xmlSetNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def setProp(self, name, value):
"""Set (or reset) an attribute carried by a node. If @name has
a prefix, then the corresponding namespace-binding will be
used, if in scope; it is an error it there's no such
ns-binding for the prefix in scope. """
ret = libxml2mod.xmlSetProp(self._o, name, value)
if ret is None:raise treeError('xmlSetProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def setSpacePreserve(self, val):
"""Set (or reset) the space preserving behaviour of a node,
i.e. the value of the xml:space attribute. """
libxml2mod.xmlNodeSetSpacePreserve(self._o, val)
def setTreeDoc(self, doc):
"""update all nodes under the tree to point to the right
document """
if doc is None: doc__o = None
else: doc__o = doc._o
libxml2mod.xmlSetTreeDoc(self._o, doc__o)
def textConcat(self, content, len):
"""Concat the given string at the end of the existing node
content """
ret = libxml2mod.xmlTextConcat(self._o, content, len)
return ret
def textMerge(self, second):
"""Merge two text nodes into one """
if second is None: second__o = None
else: second__o = second._o
ret = libxml2mod.xmlTextMerge(self._o, second__o)
if ret is None:raise treeError('xmlTextMerge() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def unlinkNode(self):
"""Unlink a node from it's current context, the node is not
freed If one need to free the node, use xmlFreeNode()
routine after the unlink to discard it. Note that namespace
nodes can't be unlinked as they do not have pointer to
their parent. """
libxml2mod.xmlUnlinkNode(self._o)
def unsetNsProp(self, ns, name):
"""Remove an attribute carried by a node. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlUnsetNsProp(self._o, ns__o, name)
return ret
def unsetProp(self, name):
"""Remove an attribute carried by a node. This handles only
attributes in no namespace. """
ret = libxml2mod.xmlUnsetProp(self._o, name)
return ret
#
# xmlNode functions from module valid
#
def isID(self, doc, attr):
"""Determine whether an attribute is of type ID. In case we
have DTD(s) then this is done if DTD loading has been
requested. In the case of HTML documents parsed with the
HTML parser, then ID detection is done systematically. """
if doc is None: doc__o = None
else: doc__o = doc._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsID(doc__o, self._o, attr__o)
return ret
def isRef(self, doc, attr):
"""Determine whether an attribute is of type Ref. In case we
have DTD(s) then this is simple, otherwise we use an
heuristic: name Ref (upper or lowercase). """
if doc is None: doc__o = None
else: doc__o = doc._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsRef(doc__o, self._o, attr__o)
return ret
def validNormalizeAttributeValue(self, doc, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidNormalizeAttributeValue(doc__o, self._o, name, value)
return ret
#
# xmlNode functions from module xinclude
#
def xincludeProcessTree(self):
"""Implement the XInclude substitution for the given subtree """
ret = libxml2mod.xmlXIncludeProcessTree(self._o)
return ret
def xincludeProcessTreeFlags(self, flags):
"""Implement the XInclude substitution for the given subtree """
ret = libxml2mod.xmlXIncludeProcessTreeFlags(self._o, flags)
return ret
#
# xmlNode functions from module xmlschemas
#
def schemaValidateOneElement(self, ctxt):
"""Validate a branch of a tree, starting with the given @elem. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlSchemaValidateOneElement(ctxt__o, self._o)
return ret
#
# xmlNode functions from module xpath
#
def xpathCastNodeToNumber(self):
"""Converts a node to its number value """
ret = libxml2mod.xmlXPathCastNodeToNumber(self._o)
return ret
def xpathCastNodeToString(self):
"""Converts a node to its string value. """
ret = libxml2mod.xmlXPathCastNodeToString(self._o)
return ret
def xpathCmpNodes(self, node2):
"""Compare two nodes w.r.t document order """
if node2 is None: node2__o = None
else: node2__o = node2._o
ret = libxml2mod.xmlXPathCmpNodes(self._o, node2__o)
return ret
def xpathNodeEval(self, str, ctx):
"""Evaluate the XPath Location Path in the given context. The
node 'node' is set as the context node. The context node is
not restored. """
if ctx is None: ctx__o = None
else: ctx__o = ctx._o
ret = libxml2mod.xmlXPathNodeEval(self._o, str, ctx__o)
if ret is None:raise xpathError('xmlXPathNodeEval() failed')
return xpathObjectRet(ret)
#
# xmlNode functions from module xpathInternals
#
def xpathNewNodeSet(self):
"""Create a new xmlXPathObjectPtr of type NodeSet and
initialize it with the single Node @val """
ret = libxml2mod.xmlXPathNewNodeSet(self._o)
if ret is None:raise xpathError('xmlXPathNewNodeSet() failed')
return xpathObjectRet(ret)
def xpathNewValueTree(self):
"""Create a new xmlXPathObjectPtr of type Value Tree (XSLT)
and initialize it with the tree root @val """
ret = libxml2mod.xmlXPathNewValueTree(self._o)
if ret is None:raise xpathError('xmlXPathNewValueTree() failed')
return xpathObjectRet(ret)
def xpathNextAncestor(self, ctxt):
"""Traversal function for the "ancestor" direction the
ancestor axis contains the ancestors of the context node;
the ancestors of the context node consist of the parent of
context node and the parent's parent and so on; the nodes
are ordered in reverse document order; thus the parent is
the first node on the axis, and the parent's parent is the
second node on the axis """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextAncestor(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextAncestor() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextAncestorOrSelf(self, ctxt):
"""Traversal function for the "ancestor-or-self" direction he
ancestor-or-self axis contains the context node and
ancestors of the context node in reverse document order;
thus the context node is the first node on the axis, and
the context node's parent the second; parent here is
defined the same as with the parent axis. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextAncestorOrSelf(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextAncestorOrSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextAttribute(self, ctxt):
"""Traversal function for the "attribute" direction TODO:
support DTD inherited default attributes """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextAttribute(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextAttribute() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextChild(self, ctxt):
"""Traversal function for the "child" direction The child axis
contains the children of the context node in document order. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextChild(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextDescendant(self, ctxt):
"""Traversal function for the "descendant" direction the
descendant axis contains the descendants of the context
node in document order; a descendant is a child or a child
of a child and so on. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextDescendant(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextDescendant() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextDescendantOrSelf(self, ctxt):
"""Traversal function for the "descendant-or-self" direction
the descendant-or-self axis contains the context node and
the descendants of the context node in document order; thus
the context node is the first node on the axis, and the
first child of the context node is the second node on the
axis """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextDescendantOrSelf(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextDescendantOrSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextFollowing(self, ctxt):
"""Traversal function for the "following" direction The
following axis contains all nodes in the same document as
the context node that are after the context node in
document order, excluding any descendants and excluding
attribute nodes and namespace nodes; the nodes are ordered
in document order """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextFollowing(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextFollowing() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextFollowingSibling(self, ctxt):
"""Traversal function for the "following-sibling" direction
The following-sibling axis contains the following siblings
of the context node in document order. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextFollowingSibling(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextFollowingSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextNamespace(self, ctxt):
"""Traversal function for the "namespace" direction the
namespace axis contains the namespace nodes of the context
node; the order of nodes on this axis is
implementation-defined; the axis will be empty unless the
context node is an element We keep the XML namespace node
at the end of the list. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextNamespace(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextNamespace() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextParent(self, ctxt):
"""Traversal function for the "parent" direction The parent
axis contains the parent of the context node, if there is
one. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextParent(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextParent() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextPreceding(self, ctxt):
"""Traversal function for the "preceding" direction the
preceding axis contains all nodes in the same document as
the context node that are before the context node in
document order, excluding any ancestors and excluding
attribute nodes and namespace nodes; the nodes are ordered
in reverse document order """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextPreceding(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextPreceding() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextPrecedingSibling(self, ctxt):
"""Traversal function for the "preceding-sibling" direction
The preceding-sibling axis contains the preceding siblings
of the context node in reverse document order; the first
preceding sibling is first on the axis; the sibling
preceding that node is the second on the axis and so on. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextPrecedingSibling(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextPrecedingSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextSelf(self, ctxt):
"""Traversal function for the "self" direction The self axis
contains just the context node itself """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextSelf(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
#
# xmlNode functions from module xpointer
#
def xpointerNewCollapsedRange(self):
"""Create a new xmlXPathObjectPtr of type range using a single
nodes """
ret = libxml2mod.xmlXPtrNewCollapsedRange(self._o)
if ret is None:raise treeError('xmlXPtrNewCollapsedRange() failed')
return xpathObjectRet(ret)
def xpointerNewContext(self, doc, origin):
"""Create a new XPointer context """
if doc is None: doc__o = None
else: doc__o = doc._o
if origin is None: origin__o = None
else: origin__o = origin._o
ret = libxml2mod.xmlXPtrNewContext(doc__o, self._o, origin__o)
if ret is None:raise treeError('xmlXPtrNewContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp
def xpointerNewLocationSetNodes(self, end):
"""Create a new xmlXPathObjectPtr of type LocationSet and
initialize it with the single range made of the two nodes
@start and @end """
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewLocationSetNodes(self._o, end__o)
if ret is None:raise treeError('xmlXPtrNewLocationSetNodes() failed')
return xpathObjectRet(ret)
def xpointerNewRange(self, startindex, end, endindex):
"""Create a new xmlXPathObjectPtr of type range """
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewRange(self._o, startindex, end__o, endindex)
if ret is None:raise treeError('xmlXPtrNewRange() failed')
return xpathObjectRet(ret)
def xpointerNewRangeNodes(self, end):
"""Create a new xmlXPathObjectPtr of type range using 2 nodes """
if end is None: end__o = None
else: end__o = end._o
ret = libxml2mod.xmlXPtrNewRangeNodes(self._o, end__o)
if ret is None:raise treeError('xmlXPtrNewRangeNodes() failed')
return xpathObjectRet(ret)
class xmlDoc(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlDoc got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlDoc (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlDoc functions from module HTMLparser
#
def htmlAutoCloseTag(self, name, elem):
"""The HTML DTD allows a tag to implicitly close other tags.
The list is kept in htmlStartClose array. This function
checks if the element or one of it's children would
autoclose the given tag. """
ret = libxml2mod.htmlAutoCloseTag(self._o, name, elem)
return ret
def htmlIsAutoClosed(self, elem):
"""The HTML DTD allows a tag to implicitly close other tags.
The list is kept in htmlStartClose array. This function
checks if a tag is autoclosed by one of it's child """
ret = libxml2mod.htmlIsAutoClosed(self._o, elem)
return ret
#
# xmlDoc functions from module HTMLtree
#
def htmlDocContentDumpFormatOutput(self, buf, encoding, format):
"""Dump an HTML document. """
if buf is None: buf__o = None
else: buf__o = buf._o
libxml2mod.htmlDocContentDumpFormatOutput(buf__o, self._o, encoding, format)
def htmlDocContentDumpOutput(self, buf, encoding):
"""Dump an HTML document. Formating return/spaces are added. """
if buf is None: buf__o = None
else: buf__o = buf._o
libxml2mod.htmlDocContentDumpOutput(buf__o, self._o, encoding)
def htmlDocDump(self, f):
"""Dump an HTML document to an open FILE. """
ret = libxml2mod.htmlDocDump(f, self._o)
return ret
def htmlGetMetaEncoding(self):
"""Encoding definition lookup in the Meta tags """
ret = libxml2mod.htmlGetMetaEncoding(self._o)
return ret
def htmlNodeDumpFile(self, out, cur):
"""Dump an HTML node, recursive behaviour,children are printed
too, and formatting returns are added. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpFile(out, self._o, cur__o)
def htmlNodeDumpFileFormat(self, out, cur, encoding, format):
"""Dump an HTML node, recursive behaviour,children are printed
too. TODO: if encoding == None try to save in the doc
encoding """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.htmlNodeDumpFileFormat(out, self._o, cur__o, encoding, format)
return ret
def htmlNodeDumpFormatOutput(self, buf, cur, encoding, format):
"""Dump an HTML node, recursive behaviour,children are printed
too. """
if buf is None: buf__o = None
else: buf__o = buf._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpFormatOutput(buf__o, self._o, cur__o, encoding, format)
def htmlNodeDumpOutput(self, buf, cur, encoding):
"""Dump an HTML node, recursive behaviour,children are printed
too, and formatting returns/spaces are added. """
if buf is None: buf__o = None
else: buf__o = buf._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpOutput(buf__o, self._o, cur__o, encoding)
def htmlSaveFile(self, filename):
"""Dump an HTML document to a file. If @filename is "-" the
stdout file is used. """
ret = libxml2mod.htmlSaveFile(filename, self._o)
return ret
def htmlSaveFileEnc(self, filename, encoding):
"""Dump an HTML document to a file using a given encoding and
formatting returns/spaces are added. """
ret = libxml2mod.htmlSaveFileEnc(filename, self._o, encoding)
return ret
def htmlSaveFileFormat(self, filename, encoding, format):
"""Dump an HTML document to a file using a given encoding. """
ret = libxml2mod.htmlSaveFileFormat(filename, self._o, encoding, format)
return ret
def htmlSetMetaEncoding(self, encoding):
"""Sets the current encoding in the Meta tags NOTE: this will
not change the document content encoding, just the META
flag associated. """
ret = libxml2mod.htmlSetMetaEncoding(self._o, encoding)
return ret
#
# xmlDoc functions from module debugXML
#
def debugCheckDocument(self, output):
"""Check the document for potential content problems, and
output the errors to @output """
ret = libxml2mod.xmlDebugCheckDocument(output, self._o)
return ret
def debugDumpDocument(self, output):
"""Dumps debug information for the document, it's recursive """
libxml2mod.xmlDebugDumpDocument(output, self._o)
def debugDumpDocumentHead(self, output):
"""Dumps debug information cncerning the document, not
recursive """
libxml2mod.xmlDebugDumpDocumentHead(output, self._o)
def debugDumpEntities(self, output):
"""Dumps debug information for all the entities in use by the
document """
libxml2mod.xmlDebugDumpEntities(output, self._o)
#
# xmlDoc functions from module entities
#
def addDocEntity(self, name, type, ExternalID, SystemID, content):
"""Register a new entity for this document. """
ret = libxml2mod.xmlAddDocEntity(self._o, name, type, ExternalID, SystemID, content)
if ret is None:raise treeError('xmlAddDocEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def addDtdEntity(self, name, type, ExternalID, SystemID, content):
"""Register a new entity for this document DTD external subset. """
ret = libxml2mod.xmlAddDtdEntity(self._o, name, type, ExternalID, SystemID, content)
if ret is None:raise treeError('xmlAddDtdEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def docEntity(self, name):
"""Do an entity lookup in the document entity hash table and """
ret = libxml2mod.xmlGetDocEntity(self._o, name)
if ret is None:raise treeError('xmlGetDocEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def dtdEntity(self, name):
"""Do an entity lookup in the DTD entity hash table and """
ret = libxml2mod.xmlGetDtdEntity(self._o, name)
if ret is None:raise treeError('xmlGetDtdEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def encodeEntities(self, input):
"""TODO: remove xmlEncodeEntities, once we are not afraid of
breaking binary compatibility People must migrate their
code to xmlEncodeEntitiesReentrant ! This routine will
issue a warning when encountered. """
ret = libxml2mod.xmlEncodeEntities(self._o, input)
return ret
def encodeEntitiesReentrant(self, input):
"""Do a global encoding of a string, replacing the predefined
entities and non ASCII values with their entities and
CharRef counterparts. Contrary to xmlEncodeEntities, this
routine is reentrant, and result must be deallocated. """
ret = libxml2mod.xmlEncodeEntitiesReentrant(self._o, input)
return ret
def encodeSpecialChars(self, input):
"""Do a global encoding of a string, replacing the predefined
entities this routine is reentrant, and result must be
deallocated. """
ret = libxml2mod.xmlEncodeSpecialChars(self._o, input)
return ret
def newEntity(self, name, type, ExternalID, SystemID, content):
"""Create a new entity, this differs from xmlAddDocEntity()
that if the document is None or has no internal subset
defined, then an unlinked entity structure will be
returned, it is then the responsability of the caller to
link it to the document later or free it when not needed
anymore. """
ret = libxml2mod.xmlNewEntity(self._o, name, type, ExternalID, SystemID, content)
if ret is None:raise treeError('xmlNewEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def parameterEntity(self, name):
"""Do an entity lookup in the internal and external subsets and """
ret = libxml2mod.xmlGetParameterEntity(self._o, name)
if ret is None:raise treeError('xmlGetParameterEntity() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
#
# xmlDoc functions from module relaxng
#
def relaxNGNewDocParserCtxt(self):
"""Create an XML RelaxNGs parser context for that document.
Note: since the process of compiling a RelaxNG schemas
modifies the document, the @doc parameter is duplicated
internally. """
ret = libxml2mod.xmlRelaxNGNewDocParserCtxt(self._o)
if ret is None:raise parserError('xmlRelaxNGNewDocParserCtxt() failed')
__tmp = relaxNgParserCtxt(_obj=ret)
return __tmp
def relaxNGValidateDoc(self, ctxt):
"""Validate a document tree in memory. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlRelaxNGValidateDoc(ctxt__o, self._o)
return ret
def relaxNGValidateFullElement(self, ctxt, elem):
"""Validate a full subtree when
xmlRelaxNGValidatePushElement() returned 0 and the content
of the node has been expanded. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidateFullElement(ctxt__o, self._o, elem__o)
return ret
def relaxNGValidatePopElement(self, ctxt, elem):
"""Pop the element end from the RelaxNG validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePopElement(ctxt__o, self._o, elem__o)
return ret
def relaxNGValidatePushElement(self, ctxt, elem):
"""Push a new element start on the RelaxNG validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePushElement(ctxt__o, self._o, elem__o)
return ret
#
# xmlDoc functions from module tree
#
def copyDoc(self, recursive):
"""Do a copy of the document info. If recursive, the content
tree will be copied too as well as DTD, namespaces and
entities. """
ret = libxml2mod.xmlCopyDoc(self._o, recursive)
if ret is None:raise treeError('xmlCopyDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def copyNode(self, node, extended):
"""Do a copy of the node to a given document. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlDocCopyNode(node__o, self._o, extended)
if ret is None:raise treeError('xmlDocCopyNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def copyNodeList(self, node):
"""Do a recursive copy of the node list. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlDocCopyNodeList(self._o, node__o)
if ret is None:raise treeError('xmlDocCopyNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def createIntSubset(self, name, ExternalID, SystemID):
"""Create the internal subset of a document """
ret = libxml2mod.xmlCreateIntSubset(self._o, name, ExternalID, SystemID)
if ret is None:raise treeError('xmlCreateIntSubset() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp
def docCompressMode(self):
"""get the compression ratio for a document, ZLIB based """
ret = libxml2mod.xmlGetDocCompressMode(self._o)
return ret
def dump(self, f):
"""Dump an XML document to an open FILE. """
ret = libxml2mod.xmlDocDump(f, self._o)
return ret
def elemDump(self, f, cur):
"""Dump an XML/HTML node, recursive behaviour, children are
printed too. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.xmlElemDump(f, self._o, cur__o)
def formatDump(self, f, format):
"""Dump an XML document to an open FILE. """
ret = libxml2mod.xmlDocFormatDump(f, self._o, format)
return ret
def freeDoc(self):
"""Free up all the structures used by a document, tree
included. """
libxml2mod.xmlFreeDoc(self._o)
def getRootElement(self):
"""Get the root element of the document (doc->children is a
list containing possibly comments, PIs, etc ...). """
ret = libxml2mod.xmlDocGetRootElement(self._o)
if ret is None:raise treeError('xmlDocGetRootElement() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def intSubset(self):
"""Get the internal subset of a document """
ret = libxml2mod.xmlGetIntSubset(self._o)
if ret is None:raise treeError('xmlGetIntSubset() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp
def newCDataBlock(self, content, len):
"""Creation of a new node containing a CDATA block. """
ret = libxml2mod.xmlNewCDataBlock(self._o, content, len)
if ret is None:raise treeError('xmlNewCDataBlock() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newCharRef(self, name):
"""Creation of a new character reference node. """
ret = libxml2mod.xmlNewCharRef(self._o, name)
if ret is None:raise treeError('xmlNewCharRef() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocComment(self, content):
"""Creation of a new node containing a comment within a
document. """
ret = libxml2mod.xmlNewDocComment(self._o, content)
if ret is None:raise treeError('xmlNewDocComment() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocFragment(self):
"""Creation of a new Fragment node. """
ret = libxml2mod.xmlNewDocFragment(self._o)
if ret is None:raise treeError('xmlNewDocFragment() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocNode(self, ns, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewDocNode(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewDocNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocNodeEatName(self, ns, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewDocNodeEatName(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewDocNodeEatName() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocPI(self, name, content):
"""Creation of a processing instruction element. """
ret = libxml2mod.xmlNewDocPI(self._o, name, content)
if ret is None:raise treeError('xmlNewDocPI() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocProp(self, name, value):
"""Create a new property carried by a document. """
ret = libxml2mod.xmlNewDocProp(self._o, name, value)
if ret is None:raise treeError('xmlNewDocProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newDocRawNode(self, ns, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewDocRawNode(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewDocRawNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocText(self, content):
"""Creation of a new text node within a document. """
ret = libxml2mod.xmlNewDocText(self._o, content)
if ret is None:raise treeError('xmlNewDocText() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocTextLen(self, content, len):
"""Creation of a new text node with an extra content length
parameter. The text node pertain to a given document. """
ret = libxml2mod.xmlNewDocTextLen(self._o, content, len)
if ret is None:raise treeError('xmlNewDocTextLen() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDtd(self, name, ExternalID, SystemID):
"""Creation of a new DTD for the external subset. To create an
internal subset, use xmlCreateIntSubset(). """
ret = libxml2mod.xmlNewDtd(self._o, name, ExternalID, SystemID)
if ret is None:raise treeError('xmlNewDtd() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp
def newGlobalNs(self, href, prefix):
"""Creation of a Namespace, the old way using PI and without
scoping DEPRECATED !!! """
ret = libxml2mod.xmlNewGlobalNs(self._o, href, prefix)
if ret is None:raise treeError('xmlNewGlobalNs() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def newReference(self, name):
"""Creation of a new reference node. """
ret = libxml2mod.xmlNewReference(self._o, name)
if ret is None:raise treeError('xmlNewReference() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def nodeDumpOutput(self, buf, cur, level, format, encoding):
"""Dump an XML node, recursive behaviour, children are printed
too. Note that @format = 1 provide node indenting only if
xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was
called """
if buf is None: buf__o = None
else: buf__o = buf._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.xmlNodeDumpOutput(buf__o, self._o, cur__o, level, format, encoding)
def nodeGetBase(self, cur):
"""Searches for the BASE URL. The code should work on both XML
and HTML document even if base mechanisms are completely
different. It returns the base as defined in RFC 2396
sections 5.1.1. Base URI within Document Content and 5.1.2.
Base URI from the Encapsulating Entity However it does not
return the document base (5.1.3), use doc->URL in this case """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlNodeGetBase(self._o, cur__o)
return ret
def nodeListGetRawString(self, list, inLine):
"""Builds the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs, contrary to
xmlNodeListGetString() this function doesn't do any
character encoding handling. """
if list is None: list__o = None
else: list__o = list._o
ret = libxml2mod.xmlNodeListGetRawString(self._o, list__o, inLine)
return ret
def nodeListGetString(self, list, inLine):
"""Build the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs """
if list is None: list__o = None
else: list__o = list._o
ret = libxml2mod.xmlNodeListGetString(self._o, list__o, inLine)
return ret
def reconciliateNs(self, tree):
"""This function checks that all the namespaces declared
within the given tree are properly declared. This is needed
for example after Copy or Cut and then paste operations.
The subtree may still hold pointers to namespace
declarations outside the subtree or invalid/masked. As much
as possible the function try to reuse the existing
namespaces found in the new environment. If not possible
the new namespaces are redeclared on @tree at the top of
the given subtree. """
if tree is None: tree__o = None
else: tree__o = tree._o
ret = libxml2mod.xmlReconciliateNs(self._o, tree__o)
return ret
def saveFile(self, filename):
"""Dump an XML document to a file. Will use compression if
compiled in and enabled. If @filename is "-" the stdout
file is used. """
ret = libxml2mod.xmlSaveFile(filename, self._o)
return ret
def saveFileEnc(self, filename, encoding):
"""Dump an XML document, converting it to the given encoding """
ret = libxml2mod.xmlSaveFileEnc(filename, self._o, encoding)
return ret
def saveFileTo(self, buf, encoding):
"""Dump an XML document to an I/O buffer. Warning ! This call
xmlOutputBufferClose() on buf which is not available after
this call. """
if buf is None: buf__o = None
else: buf__o = buf._o
ret = libxml2mod.xmlSaveFileTo(buf__o, self._o, encoding)
return ret
def saveFormatFile(self, filename, format):
"""Dump an XML document to a file. Will use compression if
compiled in and enabled. If @filename is "-" the stdout
file is used. If @format is set then the document will be
indented on output. Note that @format = 1 provide node
indenting only if xmlIndentTreeOutput = 1 or
xmlKeepBlanksDefault(0) was called """
ret = libxml2mod.xmlSaveFormatFile(filename, self._o, format)
return ret
def saveFormatFileEnc(self, filename, encoding, format):
"""Dump an XML document to a file or an URL. """
ret = libxml2mod.xmlSaveFormatFileEnc(filename, self._o, encoding, format)
return ret
def saveFormatFileTo(self, buf, encoding, format):
"""Dump an XML document to an I/O buffer. Warning ! This call
xmlOutputBufferClose() on buf which is not available after
this call. """
if buf is None: buf__o = None
else: buf__o = buf._o
ret = libxml2mod.xmlSaveFormatFileTo(buf__o, self._o, encoding, format)
return ret
def searchNs(self, node, nameSpace):
"""Search a Ns registered under a given name space for a
document. recurse on the parents until it finds the defined
namespace or return None otherwise. @nameSpace can be None,
this is a search for the default namespace. We don't allow
to cross entities boundaries. If you don't declare the
namespace within those you will be in troubles !!! A
warning is generated to cover this case. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlSearchNs(self._o, node__o, nameSpace)
if ret is None:raise treeError('xmlSearchNs() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def searchNsByHref(self, node, href):
"""Search a Ns aliasing a given URI. Recurse on the parents
until it finds the defined namespace or return None
otherwise. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlSearchNsByHref(self._o, node__o, href)
if ret is None:raise treeError('xmlSearchNsByHref() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def setDocCompressMode(self, mode):
"""set the compression ratio for a document, ZLIB based
Correct values: 0 (uncompressed) to 9 (max compression) """
libxml2mod.xmlSetDocCompressMode(self._o, mode)
def setListDoc(self, list):
"""update all nodes in the list to point to the right document """
if list is None: list__o = None
else: list__o = list._o
libxml2mod.xmlSetListDoc(list__o, self._o)
def setRootElement(self, root):
"""Set the root element of the document (doc->children is a
list containing possibly comments, PIs, etc ...). """
if root is None: root__o = None
else: root__o = root._o
ret = libxml2mod.xmlDocSetRootElement(self._o, root__o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp
def setTreeDoc(self, tree):
"""update all nodes under the tree to point to the right
document """
if tree is None: tree__o = None
else: tree__o = tree._o
libxml2mod.xmlSetTreeDoc(tree__o, self._o)
def stringGetNodeList(self, value):
"""Parse the value string and build the node list associated.
Should produce a flat tree with only TEXTs and ENTITY_REFs. """
ret = libxml2mod.xmlStringGetNodeList(self._o, value)
if ret is None:raise treeError('xmlStringGetNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def stringLenGetNodeList(self, value, len):
"""Parse the value string and build the node list associated.
Should produce a flat tree with only TEXTs and ENTITY_REFs. """
ret = libxml2mod.xmlStringLenGetNodeList(self._o, value, len)
if ret is None:raise treeError('xmlStringLenGetNodeList() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
#
# xmlDoc functions from module valid
#
def ID(self, ID):
"""Search the attribute declaring the given ID """
ret = libxml2mod.xmlGetID(self._o, ID)
if ret is None:raise treeError('xmlGetID() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def isID(self, elem, attr):
"""Determine whether an attribute is of type ID. In case we
have DTD(s) then this is done if DTD loading has been
requested. In the case of HTML documents parsed with the
HTML parser, then ID detection is done systematically. """
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsID(self._o, elem__o, attr__o)
return ret
def isMixedElement(self, name):
"""Search in the DtDs whether an element accept Mixed content
(or ANY) basically if it is supposed to accept text childs """
ret = libxml2mod.xmlIsMixedElement(self._o, name)
return ret
def isRef(self, elem, attr):
"""Determine whether an attribute is of type Ref. In case we
have DTD(s) then this is simple, otherwise we use an
heuristic: name Ref (upper or lowercase). """
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsRef(self._o, elem__o, attr__o)
return ret
def removeID(self, attr):
"""Remove the given attribute from the ID table maintained
internally. """
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlRemoveID(self._o, attr__o)
return ret
def removeRef(self, attr):
"""Remove the given attribute from the Ref table maintained
internally. """
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlRemoveRef(self._o, attr__o)
return ret
def validCtxtNormalizeAttributeValue(self, ctxt, elem, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character.
Also check VC: Standalone Document Declaration in P32, and
update ctxt->valid accordingly """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidCtxtNormalizeAttributeValue(ctxt__o, self._o, elem__o, name, value)
return ret
def validNormalizeAttributeValue(self, elem, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidNormalizeAttributeValue(self._o, elem__o, name, value)
return ret
def validateDocument(self, ctxt):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocument(ctxt__o, self._o)
return ret
def validateDocumentFinal(self, ctxt):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocumentFinal(ctxt__o, self._o)
return ret
def validateDtd(self, ctxt, dtd):
"""Try to validate the document against the dtd instance
Basically it does check all the definitions in the DtD.
Note the the internal subset (if present) is de-coupled
(i.e. not used), which could give problems if ID or IDREF
is present. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if dtd is None: dtd__o = None
else: dtd__o = dtd._o
ret = libxml2mod.xmlValidateDtd(ctxt__o, self._o, dtd__o)
return ret
def validateDtdFinal(self, ctxt):
"""Does the final step for the dtds validation once all the
subsets have been parsed basically it does the following
checks described by the XML Rec - check that ENTITY and
ENTITIES type attributes default or possible values matches
one of the defined entities. - check that NOTATION type
attributes default or possible values matches one of the
defined notations. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDtdFinal(ctxt__o, self._o)
return ret
def validateElement(self, ctxt, elem):
"""Try to validate the subtree under an element """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateElement(ctxt__o, self._o, elem__o)
return ret
def validateNotationUse(self, ctxt, notationName):
"""Validate that the given name match a notation declaration.
- [ VC: Notation Declared ] """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateNotationUse(ctxt__o, self._o, notationName)
return ret
def validateOneAttribute(self, ctxt, elem, attr, value):
"""Try to validate a single attribute for an element basically
it does the following checks as described by the XML-1.0
recommendation: - [ VC: Attribute Value Type ] - [ VC:
Fixed Attribute Default ] - [ VC: Entity Name ] - [ VC:
Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC: Entity
Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlValidateOneAttribute(ctxt__o, self._o, elem__o, attr__o, value)
return ret
def validateOneElement(self, ctxt, elem):
"""Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateOneElement(ctxt__o, self._o, elem__o)
return ret
def validateOneNamespace(self, ctxt, elem, prefix, ns, value):
"""Try to validate a single namespace declaration for an
element basically it does the following checks as described
by the XML-1.0 recommendation: - [ VC: Attribute Value Type
] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] -
[ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC:
Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlValidateOneNamespace(ctxt__o, self._o, elem__o, prefix, ns__o, value)
return ret
def validatePopElement(self, ctxt, elem, qname):
"""Pop the element end from the validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePopElement(ctxt__o, self._o, elem__o, qname)
return ret
def validatePushElement(self, ctxt, elem, qname):
"""Push a new element start on the validation stack. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePushElement(ctxt__o, self._o, elem__o, qname)
return ret
def validateRoot(self, ctxt):
"""Try to validate a the root element basically it does the
following check as described by the XML-1.0 recommendation:
- [ VC: Root Element Type ] it doesn't try to recurse or
apply other check to the element """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateRoot(ctxt__o, self._o)
return ret
#
# xmlDoc functions from module xinclude
#
def xincludeProcess(self):
"""Implement the XInclude substitution on the XML document @doc """
ret = libxml2mod.xmlXIncludeProcess(self._o)
return ret
def xincludeProcessFlags(self, flags):
"""Implement the XInclude substitution on the XML document @doc """
ret = libxml2mod.xmlXIncludeProcessFlags(self._o, flags)
return ret
#
# xmlDoc functions from module xmlreader
#
def NewWalker(self, reader):
"""Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlReaderNewWalker(reader__o, self._o)
return ret
def readerWalker(self):
"""Create an xmltextReader for a preparsed document. """
ret = libxml2mod.xmlReaderWalker(self._o)
if ret is None:raise treeError('xmlReaderWalker() failed')
__tmp = xmlTextReader(_obj=ret)
return __tmp
#
# xmlDoc functions from module xmlschemas
#
def schemaNewDocParserCtxt(self):
"""Create an XML Schemas parse context for that document. NB.
The document may be modified during the parsing process. """
ret = libxml2mod.xmlSchemaNewDocParserCtxt(self._o)
if ret is None:raise parserError('xmlSchemaNewDocParserCtxt() failed')
__tmp = SchemaParserCtxt(_obj=ret)
return __tmp
def schemaValidateDoc(self, ctxt):
"""Validate a document tree in memory. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlSchemaValidateDoc(ctxt__o, self._o)
return ret
#
# xmlDoc functions from module xpath
#
def xpathNewContext(self):
"""Create a new xmlXPathContext """
ret = libxml2mod.xmlXPathNewContext(self._o)
if ret is None:raise xpathError('xmlXPathNewContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp
def xpathOrderDocElems(self):
"""Call this routine to speed up XPath computation on static
documents. This stamps all the element nodes with the
document order Like for line information, the order is kept
in the element->content field, the value stored is actually
- the node number (starting at -1) to be able to
differentiate from line numbers. """
ret = libxml2mod.xmlXPathOrderDocElems(self._o)
return ret
#
# xmlDoc functions from module xpointer
#
def xpointerNewContext(self, here, origin):
"""Create a new XPointer context """
if here is None: here__o = None
else: here__o = here._o
if origin is None: origin__o = None
else: origin__o = origin._o
ret = libxml2mod.xmlXPtrNewContext(self._o, here__o, origin__o)
if ret is None:raise treeError('xmlXPtrNewContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp
class parserCtxt(parserCtxtCore):
def __init__(self, _obj=None):
self._o = _obj
parserCtxtCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeParserCtxt(self._o)
self._o = None
# accessors for parserCtxt
def doc(self):
"""Get the document tree from a parser context. """
ret = libxml2mod.xmlParserGetDoc(self._o)
if ret is None:raise parserError('xmlParserGetDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def isValid(self):
"""Get the validity information from a parser context. """
ret = libxml2mod.xmlParserGetIsValid(self._o)
return ret
def lineNumbers(self, linenumbers):
"""Switch on the generation of line number for elements nodes. """
libxml2mod.xmlParserSetLineNumbers(self._o, linenumbers)
def loadSubset(self, loadsubset):
"""Switch the parser to load the DTD without validating. """
libxml2mod.xmlParserSetLoadSubset(self._o, loadsubset)
def pedantic(self, pedantic):
"""Switch the parser to be pedantic. """
libxml2mod.xmlParserSetPedantic(self._o, pedantic)
def replaceEntities(self, replaceEntities):
"""Switch the parser to replace entities. """
libxml2mod.xmlParserSetReplaceEntities(self._o, replaceEntities)
def validate(self, validate):
"""Switch the parser to validation mode. """
libxml2mod.xmlParserSetValidate(self._o, validate)
def wellFormed(self):
"""Get the well formed information from a parser context. """
ret = libxml2mod.xmlParserGetWellFormed(self._o)
return ret
#
# parserCtxt functions from module HTMLparser
#
def htmlCtxtReadDoc(self, cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadDoc(self._o, cur, URL, encoding, options)
if ret is None:raise treeError('htmlCtxtReadDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def htmlCtxtReadFd(self, fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadFd(self._o, fd, URL, encoding, options)
if ret is None:raise treeError('htmlCtxtReadFd() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def htmlCtxtReadFile(self, filename, encoding, options):
"""parse an XML file from the filesystem or the network. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadFile(self._o, filename, encoding, options)
if ret is None:raise treeError('htmlCtxtReadFile() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def htmlCtxtReadMemory(self, buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.htmlCtxtReadMemory(self._o, buffer, size, URL, encoding, options)
if ret is None:raise treeError('htmlCtxtReadMemory() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def htmlCtxtReset(self):
"""Reset a parser context """
libxml2mod.htmlCtxtReset(self._o)
def htmlCtxtUseOptions(self, options):
"""Applies the options to the parser context """
ret = libxml2mod.htmlCtxtUseOptions(self._o, options)
return ret
def htmlFreeParserCtxt(self):
"""Free all the memory used by a parser context. However the
parsed document in ctxt->myDoc is not freed. """
libxml2mod.htmlFreeParserCtxt(self._o)
def htmlParseCharRef(self):
"""parse Reference declarations [66] CharRef ::= '&#' [0-9]+
';' | '&#x' [0-9a-fA-F]+ ';' """
ret = libxml2mod.htmlParseCharRef(self._o)
return ret
def htmlParseChunk(self, chunk, size, terminate):
"""Parse a Chunk of memory """
ret = libxml2mod.htmlParseChunk(self._o, chunk, size, terminate)
return ret
def htmlParseDocument(self):
"""parse an HTML document (and build a tree if using the
standard SAX interface). """
ret = libxml2mod.htmlParseDocument(self._o)
return ret
def htmlParseElement(self):
"""parse an HTML element, this is highly recursive this is
kept for compatibility with previous code versions [39]
element ::= EmptyElemTag | STag content ETag [41]
Attribute ::= Name Eq AttValue """
libxml2mod.htmlParseElement(self._o)
#
# parserCtxt functions from module parser
#
def byteConsumed(self):
"""This function provides the current index of the parser
relative to the start of the current entity. This function
is computed in bytes from the beginning starting at zero
and finishing at the size in byte of the file if parsing a
file. The function is of constant cost if the input is
UTF-8 but can be costly if run on non-UTF-8 input. """
ret = libxml2mod.xmlByteConsumed(self._o)
return ret
def clearParserCtxt(self):
"""Clear (release owned resources) and reinitialize a parser
context """
libxml2mod.xmlClearParserCtxt(self._o)
def ctxtReadDoc(self, cur, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.xmlCtxtReadDoc(self._o, cur, URL, encoding, options)
if ret is None:raise treeError('xmlCtxtReadDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def ctxtReadFd(self, fd, URL, encoding, options):
"""parse an XML from a file descriptor and build a tree. This
reuses the existing @ctxt parser context NOTE that the file
descriptor will not be closed when the reader is closed or
reset. """
ret = libxml2mod.xmlCtxtReadFd(self._o, fd, URL, encoding, options)
if ret is None:raise treeError('xmlCtxtReadFd() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def ctxtReadFile(self, filename, encoding, options):
"""parse an XML file from the filesystem or the network. This
reuses the existing @ctxt parser context """
ret = libxml2mod.xmlCtxtReadFile(self._o, filename, encoding, options)
if ret is None:raise treeError('xmlCtxtReadFile() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def ctxtReadMemory(self, buffer, size, URL, encoding, options):
"""parse an XML in-memory document and build a tree. This
reuses the existing @ctxt parser context """
ret = libxml2mod.xmlCtxtReadMemory(self._o, buffer, size, URL, encoding, options)
if ret is None:raise treeError('xmlCtxtReadMemory() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def ctxtReset(self):
"""Reset a parser context """
libxml2mod.xmlCtxtReset(self._o)
def ctxtResetPush(self, chunk, size, filename, encoding):
"""Reset a push parser context """
ret = libxml2mod.xmlCtxtResetPush(self._o, chunk, size, filename, encoding)
return ret
def ctxtUseOptions(self, options):
"""Applies the options to the parser context """
ret = libxml2mod.xmlCtxtUseOptions(self._o, options)
return ret
def initParserCtxt(self):
"""Initialize a parser context """
ret = libxml2mod.xmlInitParserCtxt(self._o)
return ret
def parseChunk(self, chunk, size, terminate):
"""Parse a Chunk of memory """
ret = libxml2mod.xmlParseChunk(self._o, chunk, size, terminate)
return ret
def parseDocument(self):
"""parse an XML document (and build a tree if using the
standard SAX interface). [1] document ::= prolog element
Misc* [22] prolog ::= XMLDecl? Misc* (doctypedecl Misc*)? """
ret = libxml2mod.xmlParseDocument(self._o)
return ret
def parseExtParsedEnt(self):
"""parse a general parsed entity An external general parsed
entity is well-formed if it matches the production labeled
extParsedEnt. [78] extParsedEnt ::= TextDecl? content """
ret = libxml2mod.xmlParseExtParsedEnt(self._o)
return ret
def setupParserForBuffer(self, buffer, filename):
"""Setup the parser context to parse a new buffer; Clears any
prior contents from the parser context. The buffer
parameter must not be None, but the filename parameter can
be """
libxml2mod.xmlSetupParserForBuffer(self._o, buffer, filename)
def stopParser(self):
"""Blocks further parser processing """
libxml2mod.xmlStopParser(self._o)
#
# parserCtxt functions from module parserInternals
#
def decodeEntities(self, len, what, end, end2, end3):
"""This function is deprecated, we now always process entities
content through xmlStringDecodeEntities TODO: remove it in
next major release. [67] Reference ::= EntityRef | CharRef
[69] PEReference ::= '%' Name ';' """
ret = libxml2mod.xmlDecodeEntities(self._o, len, what, end, end2, end3)
return ret
def handleEntity(self, entity):
"""Default handling of defined entities, when should we define
a new input stream ? When do we just handle that as a set
of chars ? OBSOLETE: to be removed at some point. """
if entity is None: entity__o = None
else: entity__o = entity._o
libxml2mod.xmlHandleEntity(self._o, entity__o)
def namespaceParseNCName(self):
"""parse an XML namespace name. TODO: this seems not in use
anymore, the namespace handling is done on top of the SAX
interfaces, i.e. not on raw input. [NS 3] NCName ::=
(Letter | '_') (NCNameChar)* [NS 4] NCNameChar ::= Letter
| Digit | '.' | '-' | '_' | CombiningChar | Extender """
ret = libxml2mod.xmlNamespaceParseNCName(self._o)
return ret
def namespaceParseNSDef(self):
"""parse a namespace prefix declaration TODO: this seems not
in use anymore, the namespace handling is done on top of
the SAX interfaces, i.e. not on raw input. [NS 1] NSDef
::= PrefixDef Eq SystemLiteral [NS 2] PrefixDef ::=
'xmlns' (':' NCName)? """
ret = libxml2mod.xmlNamespaceParseNSDef(self._o)
return ret
def nextChar(self):
"""Skip to the next char input char. """
libxml2mod.xmlNextChar(self._o)
def parseAttValue(self):
"""parse a value for an attribute Note: the parser won't do
substitution of entities here, this will be handled later
in xmlStringGetNodeList [10] AttValue ::= '"' ([^<&"] |
Reference)* '"' | "'" ([^<&'] | Reference)* "'" 3.3.3
Attribute-Value Normalization: Before the value of an
attribute is passed to the application or checked for
validity, the XML processor must normalize it as follows: -
a character reference is processed by appending the
referenced character to the attribute value - an entity
reference is processed by recursively processing the
replacement text of the entity - a whitespace character
(#x20, #xD, #xA, #x9) is processed by appending #x20 to the
normalized value, except that only a single #x20 is
appended for a "#xD#xA" sequence that is part of an
external parsed entity or the literal entity value of an
internal parsed entity - other characters are processed by
appending them to the normalized value If the declared
value is not CDATA, then the XML processor must further
process the normalized attribute value by discarding any
leading and trailing space (#x20) characters, and by
replacing sequences of space (#x20) characters by a single
space (#x20) character. All attributes for which no
declaration has been read should be treated by a
non-validating parser as if declared CDATA. """
ret = libxml2mod.xmlParseAttValue(self._o)
return ret
def parseAttributeListDecl(self):
""": parse the Attribute list def for an element [52]
AttlistDecl ::= '<!ATTLIST' S Name AttDef* S? '>' [53]
AttDef ::= S Name S AttType S DefaultDecl """
libxml2mod.xmlParseAttributeListDecl(self._o)
def parseCDSect(self):
"""Parse escaped pure raw content. [18] CDSect ::= CDStart
CData CDEnd [19] CDStart ::= '<![CDATA[' [20] Data ::=
(Char* - (Char* ']]>' Char*)) [21] CDEnd ::= ']]>' """
libxml2mod.xmlParseCDSect(self._o)
def parseCharData(self, cdata):
"""parse a CharData section. if we are within a CDATA section
']]>' marks an end of section. The right angle bracket (>)
may be represented using the string ">", and must, for
compatibility, be escaped using ">" or a character
reference when it appears in the string "]]>" in content,
when that string is not marking the end of a CDATA section.
[14] CharData ::= [^<&]* - ([^<&]* ']]>' [^<&]*) """
libxml2mod.xmlParseCharData(self._o, cdata)
def parseCharRef(self):
"""parse Reference declarations [66] CharRef ::= '&#' [0-9]+
';' | '&#x' [0-9a-fA-F]+ ';' [ WFC: Legal Character ]
Characters referred to using character references must
match the production for Char. """
ret = libxml2mod.xmlParseCharRef(self._o)
return ret
def parseComment(self):
"""Skip an XML (SGML) comment <!-- .... --> The spec says that
"For compatibility, the string "--" (double-hyphen) must
not occur within comments. " [15] Comment ::= '<!--'
((Char - '-') | ('-' (Char - '-')))* '-->' """
libxml2mod.xmlParseComment(self._o)
def parseContent(self):
"""Parse a content: [43] content ::= (element | CharData |
Reference | CDSect | PI | Comment)* """
libxml2mod.xmlParseContent(self._o)
def parseDocTypeDecl(self):
"""parse a DOCTYPE declaration [28] doctypedecl ::=
'<!DOCTYPE' S Name (S ExternalID)? S? ('[' (markupdecl |
PEReference | S)* ']' S?)? '>' [ VC: Root Element Type ]
The Name in the document type declaration must match the
element type of the root element. """
libxml2mod.xmlParseDocTypeDecl(self._o)
def parseElement(self):
"""parse an XML element, this is highly recursive [39]
element ::= EmptyElemTag | STag content ETag [ WFC:
Element Type Match ] The Name in an element's end-tag must
match the element type in the start-tag. """
libxml2mod.xmlParseElement(self._o)
def parseElementDecl(self):
"""parse an Element declaration. [45] elementdecl ::=
'<!ELEMENT' S Name S contentspec S? '>' [ VC: Unique
Element Type Declaration ] No element type may be declared
more than once """
ret = libxml2mod.xmlParseElementDecl(self._o)
return ret
def parseEncName(self):
"""parse the XML encoding name [81] EncName ::= [A-Za-z]
([A-Za-z0-9._] | '-')* """
ret = libxml2mod.xmlParseEncName(self._o)
return ret
def parseEncodingDecl(self):
"""parse the XML encoding declaration [80] EncodingDecl ::= S
'encoding' Eq ('"' EncName '"' | "'" EncName "'") this
setups the conversion filters. """
ret = libxml2mod.xmlParseEncodingDecl(self._o)
return ret
def parseEndTag(self):
"""parse an end of tag [42] ETag ::= '</' Name S? '>' With
namespace [NS 9] ETag ::= '</' QName S? '>' """
libxml2mod.xmlParseEndTag(self._o)
def parseEntityDecl(self):
"""parse <!ENTITY declarations [70] EntityDecl ::= GEDecl |
PEDecl [71] GEDecl ::= '<!ENTITY' S Name S EntityDef S?
'>' [72] PEDecl ::= '<!ENTITY' S '%' S Name S PEDef S? '>'
[73] EntityDef ::= EntityValue | (ExternalID NDataDecl?)
[74] PEDef ::= EntityValue | ExternalID [76] NDataDecl ::=
S 'NDATA' S Name [ VC: Notation Declared ] The Name must
match the declared name of a notation. """
libxml2mod.xmlParseEntityDecl(self._o)
def parseEntityRef(self):
"""parse ENTITY references declarations [68] EntityRef ::=
'&' Name ';' [ WFC: Entity Declared ] In a document
without any DTD, a document with only an internal DTD
subset which contains no parameter entity references, or a
document with "standalone='yes'", the Name given in the
entity reference must match that in an entity declaration,
except that well-formed documents need not declare any of
the following entities: amp, lt, gt, apos, quot. The
declaration of a parameter entity must precede any
reference to it. Similarly, the declaration of a general
entity must precede any reference to it which appears in a
default value in an attribute-list declaration. Note that
if entities are declared in the external subset or in
external parameter entities, a non-validating processor is
not obligated to read and process their declarations; for
such documents, the rule that an entity must be declared is
a well-formedness constraint only if standalone='yes'. [
WFC: Parsed Entity ] An entity reference must not contain
the name of an unparsed entity """
ret = libxml2mod.xmlParseEntityRef(self._o)
if ret is None:raise parserError('xmlParseEntityRef() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp
def parseExternalSubset(self, ExternalID, SystemID):
"""parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) * """
libxml2mod.xmlParseExternalSubset(self._o, ExternalID, SystemID)
def parseMarkupDecl(self):
"""parse Markup declarations [29] markupdecl ::= elementdecl
| AttlistDecl | EntityDecl | NotationDecl | PI | Comment [
VC: Proper Declaration/PE Nesting ] Parameter-entity
replacement text must be properly nested with markup
declarations. That is to say, if either the first character
or the last character of a markup declaration (markupdecl
above) is contained in the replacement text for a
parameter-entity reference, both must be contained in the
same replacement text. [ WFC: PEs in Internal Subset ] In
the internal DTD subset, parameter-entity references can
occur only where markup declarations can occur, not within
markup declarations. (This does not apply to references
that occur in external parameter entities or to the
external subset.) """
libxml2mod.xmlParseMarkupDecl(self._o)
def parseMisc(self):
"""parse an XML Misc* optional field. [27] Misc ::= Comment |
PI | S """
libxml2mod.xmlParseMisc(self._o)
def parseName(self):
"""parse an XML name. [4] NameChar ::= Letter | Digit | '.' |
'-' | '_' | ':' | CombiningChar | Extender [5] Name ::=
(Letter | '_' | ':') (NameChar)* [6] Names ::= Name (#x20
Name)* """
ret = libxml2mod.xmlParseName(self._o)
return ret
def parseNamespace(self):
"""xmlParseNamespace: parse specific PI '<?namespace ...'
constructs. This is what the older xml-name Working Draft
specified, a bunch of other stuff may still rely on it, so
support is still here as if it was declared on the root of
the Tree:-( TODO: remove from library To be removed at
next drop of binary compatibility """
libxml2mod.xmlParseNamespace(self._o)
def parseNmtoken(self):
"""parse an XML Nmtoken. [7] Nmtoken ::= (NameChar)+ [8]
Nmtokens ::= Nmtoken (#x20 Nmtoken)* """
ret = libxml2mod.xmlParseNmtoken(self._o)
return ret
def parseNotationDecl(self):
"""parse a notation declaration [82] NotationDecl ::=
'<!NOTATION' S Name S (ExternalID | PublicID) S? '>'
Hence there is actually 3 choices: 'PUBLIC' S PubidLiteral
'PUBLIC' S PubidLiteral S SystemLiteral and 'SYSTEM' S
SystemLiteral See the NOTE on xmlParseExternalID(). """
libxml2mod.xmlParseNotationDecl(self._o)
def parsePEReference(self):
"""parse PEReference declarations The entity content is
handled directly by pushing it's content as a new input
stream. [69] PEReference ::= '%' Name ';' [ WFC: No
Recursion ] A parsed entity must not contain a recursive
reference to itself, either directly or indirectly. [ WFC:
Entity Declared ] In a document without any DTD, a document
with only an internal DTD subset which contains no
parameter entity references, or a document with
"standalone='yes'", ... ... The declaration of a parameter
entity must precede any reference to it... [ VC: Entity
Declared ] In a document with an external subset or
external parameter entities with "standalone='no'", ...
... The declaration of a parameter entity must precede any
reference to it... [ WFC: In DTD ] Parameter-entity
references may only appear in the DTD. NOTE: misleading but
this is handled. """
libxml2mod.xmlParsePEReference(self._o)
def parsePI(self):
"""parse an XML Processing Instruction. [16] PI ::= '<?'
PITarget (S (Char* - (Char* '?>' Char*)))? '?>' The
processing is transfered to SAX once parsed. """
libxml2mod.xmlParsePI(self._o)
def parsePITarget(self):
"""parse the name of a PI [17] PITarget ::= Name - (('X' |
'x') ('M' | 'm') ('L' | 'l')) """
ret = libxml2mod.xmlParsePITarget(self._o)
return ret
def parsePubidLiteral(self):
"""parse an XML public literal [12] PubidLiteral ::= '"'
PubidChar* '"' | "'" (PubidChar - "'")* "'" """
ret = libxml2mod.xmlParsePubidLiteral(self._o)
return ret
def parseQuotedString(self):
"""Parse and return a string between quotes or doublequotes
TODO: Deprecated, to be removed at next drop of binary
compatibility """
ret = libxml2mod.xmlParseQuotedString(self._o)
return ret
def parseReference(self):
"""parse and handle entity references in content, depending on
the SAX interface, this may end-up in a call to character()
if this is a CharRef, a predefined entity, if there is no
reference() callback. or if the parser was asked to switch
to that mode. [67] Reference ::= EntityRef | CharRef """
libxml2mod.xmlParseReference(self._o)
def parseSDDecl(self):
"""parse the XML standalone declaration [32] SDDecl ::= S
'standalone' Eq (("'" ('yes' | 'no') "'") | ('"' ('yes' |
'no')'"')) [ VC: Standalone Document Declaration ] TODO
The standalone document declaration must have the value
"no" if any external markup declarations contain
declarations of: - attributes with default values, if
elements to which these attributes apply appear in the
document without specifications of values for these
attributes, or - entities (other than amp, lt, gt, apos,
quot), if references to those entities appear in the
document, or - attributes with values subject to
normalization, where the attribute appears in the document
with a value which will change as a result of
normalization, or - element types with element content, if
white space occurs directly within any instance of those
types. """
ret = libxml2mod.xmlParseSDDecl(self._o)
return ret
def parseStartTag(self):
"""parse a start of tag either for rule element or
EmptyElement. In both case we don't parse the tag closing
chars. [40] STag ::= '<' Name (S Attribute)* S? '>' [
WFC: Unique Att Spec ] No attribute name may appear more
than once in the same start-tag or empty-element tag. [44]
EmptyElemTag ::= '<' Name (S Attribute)* S? '/>' [ WFC:
Unique Att Spec ] No attribute name may appear more than
once in the same start-tag or empty-element tag. With
namespace: [NS 8] STag ::= '<' QName (S Attribute)* S? '>'
[NS 10] EmptyElement ::= '<' QName (S Attribute)* S? '/>' """
ret = libxml2mod.xmlParseStartTag(self._o)
return ret
def parseSystemLiteral(self):
"""parse an XML Literal [11] SystemLiteral ::= ('"' [^"]*
'"') | ("'" [^']* "'") """
ret = libxml2mod.xmlParseSystemLiteral(self._o)
return ret
def parseTextDecl(self):
"""parse an XML declaration header for external entities [77]
TextDecl ::= '<?xml' VersionInfo? EncodingDecl S? '?>' """
libxml2mod.xmlParseTextDecl(self._o)
def parseVersionInfo(self):
"""parse the XML version. [24] VersionInfo ::= S 'version' Eq
(' VersionNum ' | " VersionNum ") [25] Eq ::= S? '=' S? """
ret = libxml2mod.xmlParseVersionInfo(self._o)
return ret
def parseVersionNum(self):
"""parse the XML version value. [26] VersionNum ::= '1.'
[0-9]+ In practice allow [0-9].[0-9]+ at that level """
ret = libxml2mod.xmlParseVersionNum(self._o)
return ret
def parseXMLDecl(self):
"""parse an XML declaration header [23] XMLDecl ::= '<?xml'
VersionInfo EncodingDecl? SDDecl? S? '?>' """
libxml2mod.xmlParseXMLDecl(self._o)
def parserHandlePEReference(self):
"""[69] PEReference ::= '%' Name ';' [ WFC: No Recursion ] A
parsed entity must not contain a recursive reference to
itself, either directly or indirectly. [ WFC: Entity
Declared ] In a document without any DTD, a document with
only an internal DTD subset which contains no parameter
entity references, or a document with "standalone='yes'",
... ... The declaration of a parameter entity must precede
any reference to it... [ VC: Entity Declared ] In a
document with an external subset or external parameter
entities with "standalone='no'", ... ... The declaration
of a parameter entity must precede any reference to it...
[ WFC: In DTD ] Parameter-entity references may only appear
in the DTD. NOTE: misleading but this is handled. A
PEReference may have been detected in the current input
stream the handling is done accordingly to
http://www.w3.org/TR/REC-xml#entproc i.e. - Included in
literal in entity values - Included as Parameter Entity
reference within DTDs """
libxml2mod.xmlParserHandlePEReference(self._o)
def parserHandleReference(self):
"""TODO: Remove, now deprecated ... the test is done directly
in the content parsing routines. [67] Reference ::=
EntityRef | CharRef [68] EntityRef ::= '&' Name ';' [
WFC: Entity Declared ] the Name given in the entity
reference must match that in an entity declaration, except
that well-formed documents need not declare any of the
following entities: amp, lt, gt, apos, quot. [ WFC: Parsed
Entity ] An entity reference must not contain the name of
an unparsed entity [66] CharRef ::= '&#' [0-9]+ ';' |
'&#x' [0-9a-fA-F]+ ';' A PEReference may have been
detected in the current input stream the handling is done
accordingly to http://www.w3.org/TR/REC-xml#entproc """
libxml2mod.xmlParserHandleReference(self._o)
def popInput(self):
"""xmlPopInput: the current input pointed by ctxt->input came
to an end pop it and return the next char. """
ret = libxml2mod.xmlPopInput(self._o)
return ret
def scanName(self):
"""Trickery: parse an XML name but without consuming the input
flow Needed for rollback cases. Used only when parsing
entities references. TODO: seems deprecated now, only used
in the default part of xmlParserHandleReference [4]
NameChar ::= Letter | Digit | '.' | '-' | '_' | ':' |
CombiningChar | Extender [5] Name ::= (Letter | '_' | ':')
(NameChar)* [6] Names ::= Name (S Name)* """
ret = libxml2mod.xmlScanName(self._o)
return ret
def skipBlankChars(self):
"""skip all blanks character found at that point in the input
streams. It pops up finished entities in the process if
allowable at that point. """
ret = libxml2mod.xmlSkipBlankChars(self._o)
return ret
def stringDecodeEntities(self, str, what, end, end2, end3):
"""Takes a entity string content and process to do the
adequate substitutions. [67] Reference ::= EntityRef |
CharRef [69] PEReference ::= '%' Name ';' """
ret = libxml2mod.xmlStringDecodeEntities(self._o, str, what, end, end2, end3)
return ret
def stringLenDecodeEntities(self, str, len, what, end, end2, end3):
"""Takes a entity string content and process to do the
adequate substitutions. [67] Reference ::= EntityRef |
CharRef [69] PEReference ::= '%' Name ';' """
ret = libxml2mod.xmlStringLenDecodeEntities(self._o, str, len, what, end, end2, end3)
return ret
class xmlAttr(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlAttr got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlAttr (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlAttr functions from module debugXML
#
def debugDumpAttr(self, output, depth):
"""Dumps debug information for the attribute """
libxml2mod.xmlDebugDumpAttr(output, self._o, depth)
def debugDumpAttrList(self, output, depth):
"""Dumps debug information for the attribute list """
libxml2mod.xmlDebugDumpAttrList(output, self._o, depth)
#
# xmlAttr functions from module tree
#
def copyProp(self, target):
"""Do a copy of the attribute. """
if target is None: target__o = None
else: target__o = target._o
ret = libxml2mod.xmlCopyProp(target__o, self._o)
if ret is None:raise treeError('xmlCopyProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def copyPropList(self, target):
"""Do a copy of an attribute list. """
if target is None: target__o = None
else: target__o = target._o
ret = libxml2mod.xmlCopyPropList(target__o, self._o)
if ret is None:raise treeError('xmlCopyPropList() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def freeProp(self):
"""Free one attribute, all the content is freed too """
libxml2mod.xmlFreeProp(self._o)
def freePropList(self):
"""Free a property and all its siblings, all the children are
freed too. """
libxml2mod.xmlFreePropList(self._o)
def removeProp(self):
"""Unlink and free one attribute, all the content is freed too
Note this doesn't work for namespace definition attributes """
ret = libxml2mod.xmlRemoveProp(self._o)
return ret
#
# xmlAttr functions from module valid
#
def removeID(self, doc):
"""Remove the given attribute from the ID table maintained
internally. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlRemoveID(doc__o, self._o)
return ret
def removeRef(self, doc):
"""Remove the given attribute from the Ref table maintained
internally. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlRemoveRef(doc__o, self._o)
return ret
class xmlAttribute(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlAttribute got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlAttribute (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
class catalog:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeCatalog(self._o)
self._o = None
#
# catalog functions from module catalog
#
def add(self, type, orig, replace):
"""Add an entry in the catalog, it may overwrite existing but
different entries. """
ret = libxml2mod.xmlACatalogAdd(self._o, type, orig, replace)
return ret
def catalogIsEmpty(self):
"""Check is a catalog is empty """
ret = libxml2mod.xmlCatalogIsEmpty(self._o)
return ret
def convertSGMLCatalog(self):
"""Convert all the SGML catalog entries as XML ones """
ret = libxml2mod.xmlConvertSGMLCatalog(self._o)
return ret
def dump(self, out):
"""Dump the given catalog to the given file. """
libxml2mod.xmlACatalogDump(self._o, out)
def remove(self, value):
"""Remove an entry from the catalog """
ret = libxml2mod.xmlACatalogRemove(self._o, value)
return ret
def resolve(self, pubID, sysID):
"""Do a complete resolution lookup of an External Identifier """
ret = libxml2mod.xmlACatalogResolve(self._o, pubID, sysID)
return ret
def resolvePublic(self, pubID):
"""Try to lookup the catalog local reference associated to a
public ID in that catalog """
ret = libxml2mod.xmlACatalogResolvePublic(self._o, pubID)
return ret
def resolveSystem(self, sysID):
"""Try to lookup the catalog resource for a system ID """
ret = libxml2mod.xmlACatalogResolveSystem(self._o, sysID)
return ret
def resolveURI(self, URI):
"""Do a complete resolution lookup of an URI """
ret = libxml2mod.xmlACatalogResolveURI(self._o, URI)
return ret
class xmlDtd(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlDtd got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlDtd (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlDtd functions from module debugXML
#
def debugDumpDTD(self, output):
"""Dumps debug information for the DTD """
libxml2mod.xmlDebugDumpDTD(output, self._o)
#
# xmlDtd functions from module tree
#
def copyDtd(self):
"""Do a copy of the dtd. """
ret = libxml2mod.xmlCopyDtd(self._o)
if ret is None:raise treeError('xmlCopyDtd() failed')
__tmp = xmlDtd(_obj=ret)
return __tmp
def freeDtd(self):
"""Free a DTD structure. """
libxml2mod.xmlFreeDtd(self._o)
#
# xmlDtd functions from module valid
#
def dtdAttrDesc(self, elem, name):
"""Search the DTD for the description of this attribute on
this element. """
ret = libxml2mod.xmlGetDtdAttrDesc(self._o, elem, name)
if ret is None:raise treeError('xmlGetDtdAttrDesc() failed')
__tmp = xmlAttribute(_obj=ret)
return __tmp
def dtdElementDesc(self, name):
"""Search the DTD for the description of this element """
ret = libxml2mod.xmlGetDtdElementDesc(self._o, name)
if ret is None:raise treeError('xmlGetDtdElementDesc() failed')
__tmp = xmlElement(_obj=ret)
return __tmp
def dtdQAttrDesc(self, elem, name, prefix):
"""Search the DTD for the description of this qualified
attribute on this element. """
ret = libxml2mod.xmlGetDtdQAttrDesc(self._o, elem, name, prefix)
if ret is None:raise treeError('xmlGetDtdQAttrDesc() failed')
__tmp = xmlAttribute(_obj=ret)
return __tmp
def dtdQElementDesc(self, name, prefix):
"""Search the DTD for the description of this element """
ret = libxml2mod.xmlGetDtdQElementDesc(self._o, name, prefix)
if ret is None:raise treeError('xmlGetDtdQElementDesc() failed')
__tmp = xmlElement(_obj=ret)
return __tmp
class xmlElement(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlElement got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlElement (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
class xmlEntity(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlEntity got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlEntity (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlEntity functions from module parserInternals
#
def handleEntity(self, ctxt):
"""Default handling of defined entities, when should we define
a new input stream ? When do we just handle that as a set
of chars ? OBSOLETE: to be removed at some point. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
libxml2mod.xmlHandleEntity(ctxt__o, self._o)
class Error:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
# accessors for Error
def code(self):
"""The error code, e.g. an xmlParserError """
ret = libxml2mod.xmlErrorGetCode(self._o)
return ret
def domain(self):
"""What part of the library raised this error """
ret = libxml2mod.xmlErrorGetDomain(self._o)
return ret
def file(self):
"""the filename """
ret = libxml2mod.xmlErrorGetFile(self._o)
return ret
def level(self):
"""how consequent is the error """
ret = libxml2mod.xmlErrorGetLevel(self._o)
return ret
def line(self):
"""the line number if available """
ret = libxml2mod.xmlErrorGetLine(self._o)
return ret
def message(self):
"""human-readable informative error message """
ret = libxml2mod.xmlErrorGetMessage(self._o)
return ret
#
# Error functions from module xmlerror
#
def copyError(self, to):
"""Save the original error to the new place. """
if to is None: to__o = None
else: to__o = to._o
ret = libxml2mod.xmlCopyError(self._o, to__o)
return ret
def resetError(self):
"""Cleanup the error. """
libxml2mod.xmlResetError(self._o)
class xmlNs(xmlNode):
def __init__(self, _obj=None):
if checkWrapper(_obj) != 0: raise TypeError('xmlNs got a wrong wrapper object type')
self._o = _obj
xmlNode.__init__(self, _obj=_obj)
def __repr__(self):
return "<xmlNs (%s) object at 0x%x>" % (self.name, int(pos_id (self)))
#
# xmlNs functions from module tree
#
def copyNamespace(self):
"""Do a copy of the namespace. """
ret = libxml2mod.xmlCopyNamespace(self._o)
if ret is None:raise treeError('xmlCopyNamespace() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def copyNamespaceList(self):
"""Do a copy of an namespace list. """
ret = libxml2mod.xmlCopyNamespaceList(self._o)
if ret is None:raise treeError('xmlCopyNamespaceList() failed')
__tmp = xmlNs(_obj=ret)
return __tmp
def freeNs(self):
"""Free up the structures associated to a namespace """
libxml2mod.xmlFreeNs(self._o)
def freeNsList(self):
"""Free up all the structures associated to the chained
namespaces. """
libxml2mod.xmlFreeNsList(self._o)
def newChild(self, parent, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child list containing the TEXTs and ENTITY_REFs node will
be created. NOTE: @content is supposed to be a piece of XML
CDATA, so it allows entity references. XML special chars
must be escaped first by using
xmlEncodeEntitiesReentrant(), or xmlNewTextChild() should
be used. """
if parent is None: parent__o = None
else: parent__o = parent._o
ret = libxml2mod.xmlNewChild(parent__o, self._o, name, content)
if ret is None:raise treeError('xmlNewChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocNode(self, doc, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNewDocNode(doc__o, self._o, name, content)
if ret is None:raise treeError('xmlNewDocNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocNodeEatName(self, doc, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNewDocNodeEatName(doc__o, self._o, name, content)
if ret is None:raise treeError('xmlNewDocNodeEatName() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newDocRawNode(self, doc, name, content):
"""Creation of a new node element within a document. @ns and
@content are optional (None). """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlNewDocRawNode(doc__o, self._o, name, content)
if ret is None:raise treeError('xmlNewDocRawNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newNodeEatName(self, name):
"""Creation of a new node element. @ns is optional (None). """
ret = libxml2mod.xmlNewNodeEatName(self._o, name)
if ret is None:raise treeError('xmlNewNodeEatName() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def newNsProp(self, node, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlNewNsProp(node__o, self._o, name, value)
if ret is None:raise treeError('xmlNewNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newNsPropEatName(self, node, name, value):
"""Create a new property tagged with a namespace and carried
by a node. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlNewNsPropEatName(node__o, self._o, name, value)
if ret is None:raise treeError('xmlNewNsPropEatName() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def newTextChild(self, parent, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child TEXT node will be created containing the string
@content. NOTE: Use xmlNewChild() if @content will contain
entities that need to be preserved. Use this function,
xmlNewTextChild(), if you need to ensure that reserved XML
chars that might appear in @content, such as the ampersand,
greater-than or less-than signs, are automatically replaced
by their XML escaped entity representations. """
if parent is None: parent__o = None
else: parent__o = parent._o
ret = libxml2mod.xmlNewTextChild(parent__o, self._o, name, content)
if ret is None:raise treeError('xmlNewTextChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def setNs(self, node):
"""Associate a namespace to a node, a posteriori. """
if node is None: node__o = None
else: node__o = node._o
libxml2mod.xmlSetNs(node__o, self._o)
def setNsProp(self, node, name, value):
"""Set (or reset) an attribute carried by a node. The ns
structure must be in scope, this is not checked """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlSetNsProp(node__o, self._o, name, value)
if ret is None:raise treeError('xmlSetNsProp() failed')
__tmp = xmlAttr(_obj=ret)
return __tmp
def unsetNsProp(self, node, name):
"""Remove an attribute carried by a node. """
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlUnsetNsProp(node__o, self._o, name)
return ret
#
# xmlNs functions from module xpathInternals
#
def xpathNodeSetFreeNs(self):
"""Namespace nodes in libxml don't match the XPath semantic.
In a node set the namespace nodes are duplicated and the
next pointer is set to the parent node in the XPath
semantic. Check if such a node needs to be freed """
libxml2mod.xmlXPathNodeSetFreeNs(self._o)
class outputBuffer(ioWriteWrapper):
def __init__(self, _obj=None):
self._o = _obj
ioWriteWrapper.__init__(self, _obj=_obj)
#
# outputBuffer functions from module HTMLtree
#
def htmlDocContentDumpFormatOutput(self, cur, encoding, format):
"""Dump an HTML document. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpFormatOutput(self._o, cur__o, encoding, format)
def htmlDocContentDumpOutput(self, cur, encoding):
"""Dump an HTML document. Formating return/spaces are added. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpOutput(self._o, cur__o, encoding)
def htmlNodeDumpFormatOutput(self, doc, cur, encoding, format):
"""Dump an HTML node, recursive behaviour,children are printed
too. """
if doc is None: doc__o = None
else: doc__o = doc._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpFormatOutput(self._o, doc__o, cur__o, encoding, format)
def htmlNodeDumpOutput(self, doc, cur, encoding):
"""Dump an HTML node, recursive behaviour,children are printed
too, and formatting returns/spaces are added. """
if doc is None: doc__o = None
else: doc__o = doc._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlNodeDumpOutput(self._o, doc__o, cur__o, encoding)
#
# outputBuffer functions from module tree
#
def nodeDumpOutput(self, doc, cur, level, format, encoding):
"""Dump an XML node, recursive behaviour, children are printed
too. Note that @format = 1 provide node indenting only if
xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was
called """
if doc is None: doc__o = None
else: doc__o = doc._o
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.xmlNodeDumpOutput(self._o, doc__o, cur__o, level, format, encoding)
def saveFileTo(self, cur, encoding):
"""Dump an XML document to an I/O buffer. Warning ! This call
xmlOutputBufferClose() on buf which is not available after
this call. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlSaveFileTo(self._o, cur__o, encoding)
return ret
def saveFormatFileTo(self, cur, encoding, format):
"""Dump an XML document to an I/O buffer. Warning ! This call
xmlOutputBufferClose() on buf which is not available after
this call. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlSaveFormatFileTo(self._o, cur__o, encoding, format)
return ret
#
# outputBuffer functions from module xmlIO
#
def getContent(self):
"""Gives a pointer to the data currently held in the output
buffer """
ret = libxml2mod.xmlOutputBufferGetContent(self._o)
return ret
def write(self, len, buf):
"""Write the content of the array in the output I/O buffer
This routine handle the I18N transcoding from internal
UTF-8 The buffer is lossless, i.e. will store in case of
partial or delayed writes. """
ret = libxml2mod.xmlOutputBufferWrite(self._o, len, buf)
return ret
def writeString(self, str):
"""Write the content of the string in the output I/O buffer
This routine handle the I18N transcoding from internal
UTF-8 The buffer is lossless, i.e. will store in case of
partial or delayed writes. """
ret = libxml2mod.xmlOutputBufferWriteString(self._o, str)
return ret
class inputBuffer(ioReadWrapper):
def __init__(self, _obj=None):
self._o = _obj
ioReadWrapper.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeParserInputBuffer(self._o)
self._o = None
#
# inputBuffer functions from module xmlIO
#
def grow(self, len):
"""Grow up the content of the input buffer, the old data are
preserved This routine handle the I18N transcoding to
internal UTF-8 This routine is used when operating the
parser in normal (pull) mode TODO: one should be able to
remove one extra copy by copying directly onto in->buffer
or in->raw """
ret = libxml2mod.xmlParserInputBufferGrow(self._o, len)
return ret
def push(self, len, buf):
"""Push the content of the arry in the input buffer This
routine handle the I18N transcoding to internal UTF-8 This
is used when operating the parser in progressive (push)
mode. """
ret = libxml2mod.xmlParserInputBufferPush(self._o, len, buf)
return ret
def read(self, len):
"""Refresh the content of the input buffer, the old data are
considered consumed This routine handle the I18N
transcoding to internal UTF-8 """
ret = libxml2mod.xmlParserInputBufferRead(self._o, len)
return ret
#
# inputBuffer functions from module xmlreader
#
def Setup(self, reader, URL, encoding, options):
"""Setup an XML reader with new options """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderSetup(reader__o, self._o, URL, encoding, options)
return ret
def newTextReader(self, URI):
"""Create an xmlTextReader structure fed with @input """
ret = libxml2mod.xmlNewTextReader(self._o, URI)
if ret is None:raise treeError('xmlNewTextReader() failed')
__tmp = xmlTextReader(_obj=ret)
__tmp.input = self
return __tmp
class xmlReg:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlRegFreeRegexp(self._o)
self._o = None
#
# xmlReg functions from module xmlregexp
#
def regexpExec(self, content):
"""Check if the regular expression generates the value """
ret = libxml2mod.xmlRegexpExec(self._o, content)
return ret
def regexpIsDeterminist(self):
"""Check if the regular expression is determinist """
ret = libxml2mod.xmlRegexpIsDeterminist(self._o)
return ret
def regexpPrint(self, output):
"""Print the content of the compiled regular expression """
libxml2mod.xmlRegexpPrint(output, self._o)
class relaxNgParserCtxt:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlRelaxNGFreeParserCtxt(self._o)
self._o = None
#
# relaxNgParserCtxt functions from module relaxng
#
def relaxNGParse(self):
"""parse a schema definition resource and build an internal
XML Shema struture which can be used to validate instances. """
ret = libxml2mod.xmlRelaxNGParse(self._o)
if ret is None:raise parserError('xmlRelaxNGParse() failed')
__tmp = relaxNgSchema(_obj=ret)
return __tmp
def relaxParserSetFlag(self, flags):
"""Semi private function used to pass informations to a parser
context which are a combination of xmlRelaxNGParserFlag . """
ret = libxml2mod.xmlRelaxParserSetFlag(self._o, flags)
return ret
class relaxNgSchema:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlRelaxNGFree(self._o)
self._o = None
#
# relaxNgSchema functions from module relaxng
#
def relaxNGDump(self, output):
"""Dump a RelaxNG structure back """
libxml2mod.xmlRelaxNGDump(output, self._o)
def relaxNGDumpTree(self, output):
"""Dump the transformed RelaxNG tree. """
libxml2mod.xmlRelaxNGDumpTree(output, self._o)
def relaxNGNewValidCtxt(self):
"""Create an XML RelaxNGs validation context based on the
given schema """
ret = libxml2mod.xmlRelaxNGNewValidCtxt(self._o)
if ret is None:raise treeError('xmlRelaxNGNewValidCtxt() failed')
__tmp = relaxNgValidCtxt(_obj=ret)
__tmp.schema = self
return __tmp
#
# relaxNgSchema functions from module xmlreader
#
def RelaxNGSetSchema(self, reader):
"""Use RelaxNG to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then RelaxNG validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderRelaxNGSetSchema(reader__o, self._o)
return ret
class relaxNgValidCtxt(relaxNgValidCtxtCore):
def __init__(self, _obj=None):
self.schema = None
self._o = _obj
relaxNgValidCtxtCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlRelaxNGFreeValidCtxt(self._o)
self._o = None
#
# relaxNgValidCtxt functions from module relaxng
#
def relaxNGValidateDoc(self, doc):
"""Validate a document tree in memory. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlRelaxNGValidateDoc(self._o, doc__o)
return ret
def relaxNGValidateFullElement(self, doc, elem):
"""Validate a full subtree when
xmlRelaxNGValidatePushElement() returned 0 and the content
of the node has been expanded. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidateFullElement(self._o, doc__o, elem__o)
return ret
def relaxNGValidatePopElement(self, doc, elem):
"""Pop the element end from the RelaxNG validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePopElement(self._o, doc__o, elem__o)
return ret
def relaxNGValidatePushCData(self, data, len):
"""check the CData parsed for validation in the current stack """
ret = libxml2mod.xmlRelaxNGValidatePushCData(self._o, data, len)
return ret
def relaxNGValidatePushElement(self, doc, elem):
"""Push a new element start on the RelaxNG validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlRelaxNGValidatePushElement(self._o, doc__o, elem__o)
return ret
#
# relaxNgValidCtxt functions from module xmlreader
#
def RelaxNGValidateCtxt(self, reader, options):
"""Use RelaxNG schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then RelaxNG schema validation is
deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(reader__o, self._o, options)
return ret
class SchemaParserCtxt:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlSchemaFreeParserCtxt(self._o)
self._o = None
#
# SchemaParserCtxt functions from module xmlschemas
#
def schemaParse(self):
"""parse a schema definition resource and build an internal
XML Shema struture which can be used to validate instances. """
ret = libxml2mod.xmlSchemaParse(self._o)
if ret is None:raise parserError('xmlSchemaParse() failed')
__tmp = Schema(_obj=ret)
return __tmp
class Schema:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlSchemaFree(self._o)
self._o = None
#
# Schema functions from module xmlreader
#
def SetSchema(self, reader):
"""Use XSD Schema to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then Schema validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderSetSchema(reader__o, self._o)
return ret
#
# Schema functions from module xmlschemas
#
def schemaDump(self, output):
"""Dump a Schema structure. """
libxml2mod.xmlSchemaDump(output, self._o)
def schemaNewValidCtxt(self):
"""Create an XML Schemas validation context based on the given
schema. """
ret = libxml2mod.xmlSchemaNewValidCtxt(self._o)
if ret is None:raise treeError('xmlSchemaNewValidCtxt() failed')
__tmp = SchemaValidCtxt(_obj=ret)
__tmp.schema = self
return __tmp
class SchemaValidCtxt(SchemaValidCtxtCore):
def __init__(self, _obj=None):
self.schema = None
self._o = _obj
SchemaValidCtxtCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlSchemaFreeValidCtxt(self._o)
self._o = None
#
# SchemaValidCtxt functions from module xmlreader
#
def SchemaValidateCtxt(self, reader, options):
"""Use W3C XSD schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then XML Schema validation is
deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderSchemaValidateCtxt(reader__o, self._o, options)
return ret
#
# SchemaValidCtxt functions from module xmlschemas
#
def schemaIsValid(self):
"""Check if any error was detected during validation. """
ret = libxml2mod.xmlSchemaIsValid(self._o)
return ret
def schemaSetValidOptions(self, options):
"""Sets the options to be used during the validation. """
ret = libxml2mod.xmlSchemaSetValidOptions(self._o, options)
return ret
def schemaValidCtxtGetOptions(self):
"""Get the validation context options. """
ret = libxml2mod.xmlSchemaValidCtxtGetOptions(self._o)
return ret
def schemaValidCtxtGetParserCtxt(self):
"""allow access to the parser context of the schema validation
context """
ret = libxml2mod.xmlSchemaValidCtxtGetParserCtxt(self._o)
if ret is None:raise parserError('xmlSchemaValidCtxtGetParserCtxt() failed')
__tmp = parserCtxt(_obj=ret)
return __tmp
def schemaValidateDoc(self, doc):
"""Validate a document tree in memory. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlSchemaValidateDoc(self._o, doc__o)
return ret
def schemaValidateFile(self, filename, options):
"""Do a schemas validation of the given resource, it will use
the SAX streamable validation internally. """
ret = libxml2mod.xmlSchemaValidateFile(self._o, filename, options)
return ret
def schemaValidateOneElement(self, elem):
"""Validate a branch of a tree, starting with the given @elem. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlSchemaValidateOneElement(self._o, elem__o)
return ret
def schemaValidateSetFilename(self, filename):
"""Workaround to provide file error reporting information when
this is not provided by current APIs """
libxml2mod.xmlSchemaValidateSetFilename(self._o, filename)
class xmlTextReaderLocator:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
#
# xmlTextReaderLocator functions from module xmlreader
#
def BaseURI(self):
"""Obtain the base URI for the given locator. """
ret = libxml2mod.xmlTextReaderLocatorBaseURI(self._o)
return ret
def LineNumber(self):
"""Obtain the line number for the given locator. """
ret = libxml2mod.xmlTextReaderLocatorLineNumber(self._o)
return ret
class xmlTextReader(xmlTextReaderCore):
def __init__(self, _obj=None):
self.input = None
self._o = _obj
xmlTextReaderCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeTextReader(self._o)
self._o = None
#
# xmlTextReader functions from module xmlreader
#
def AttributeCount(self):
"""Provides the number of attributes of the current node """
ret = libxml2mod.xmlTextReaderAttributeCount(self._o)
return ret
def BaseUri(self):
"""The base URI of the node. """
ret = libxml2mod.xmlTextReaderConstBaseUri(self._o)
return ret
def ByteConsumed(self):
"""This function provides the current index of the parser used
by the reader, relative to the start of the current entity.
This function actually just wraps a call to
xmlBytesConsumed() for the parser context associated with
the reader. See xmlBytesConsumed() for more information. """
ret = libxml2mod.xmlTextReaderByteConsumed(self._o)
return ret
def Close(self):
"""This method releases any resources allocated by the current
instance changes the state to Closed and close any
underlying input. """
ret = libxml2mod.xmlTextReaderClose(self._o)
return ret
def CurrentDoc(self):
"""Hacking interface allowing to get the xmlDocPtr
correponding to the current document being accessed by the
xmlTextReader. NOTE: as a result of this call, the reader
will not destroy the associated XML document and calling
xmlFreeDoc() on the result is needed once the reader
parsing has finished. """
ret = libxml2mod.xmlTextReaderCurrentDoc(self._o)
if ret is None:raise treeError('xmlTextReaderCurrentDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def CurrentNode(self):
"""Hacking interface allowing to get the xmlNodePtr
correponding to the current node being accessed by the
xmlTextReader. This is dangerous because the underlying
node may be destroyed on the next Reads. """
ret = libxml2mod.xmlTextReaderCurrentNode(self._o)
if ret is None:raise treeError('xmlTextReaderCurrentNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def Depth(self):
"""The depth of the node in the tree. """
ret = libxml2mod.xmlTextReaderDepth(self._o)
return ret
def Encoding(self):
"""Determine the encoding of the document being read. """
ret = libxml2mod.xmlTextReaderConstEncoding(self._o)
return ret
def Expand(self):
"""Reads the contents of the current node and the full
subtree. It then makes the subtree available until the next
xmlTextReaderRead() call """
ret = libxml2mod.xmlTextReaderExpand(self._o)
if ret is None:raise treeError('xmlTextReaderExpand() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def GetAttribute(self, name):
"""Provides the value of the attribute with the specified
qualified name. """
ret = libxml2mod.xmlTextReaderGetAttribute(self._o, name)
return ret
def GetAttributeNo(self, no):
"""Provides the value of the attribute with the specified
index relative to the containing element. """
ret = libxml2mod.xmlTextReaderGetAttributeNo(self._o, no)
return ret
def GetAttributeNs(self, localName, namespaceURI):
"""Provides the value of the specified attribute """
ret = libxml2mod.xmlTextReaderGetAttributeNs(self._o, localName, namespaceURI)
return ret
def GetParserColumnNumber(self):
"""Provide the column number of the current parsing point. """
ret = libxml2mod.xmlTextReaderGetParserColumnNumber(self._o)
return ret
def GetParserLineNumber(self):
"""Provide the line number of the current parsing point. """
ret = libxml2mod.xmlTextReaderGetParserLineNumber(self._o)
return ret
def GetParserProp(self, prop):
"""Read the parser internal property. """
ret = libxml2mod.xmlTextReaderGetParserProp(self._o, prop)
return ret
def GetRemainder(self):
"""Method to get the remainder of the buffered XML. this
method stops the parser, set its state to End Of File and
return the input stream with what is left that the parser
did not use. The implementation is not good, the parser
certainly procgressed past what's left in reader->input,
and there is an allocation problem. Best would be to
rewrite it differently. """
ret = libxml2mod.xmlTextReaderGetRemainder(self._o)
if ret is None:raise treeError('xmlTextReaderGetRemainder() failed')
__tmp = inputBuffer(_obj=ret)
return __tmp
def HasAttributes(self):
"""Whether the node has attributes. """
ret = libxml2mod.xmlTextReaderHasAttributes(self._o)
return ret
def HasValue(self):
"""Whether the node can have a text value. """
ret = libxml2mod.xmlTextReaderHasValue(self._o)
return ret
def IsDefault(self):
"""Whether an Attribute node was generated from the default
value defined in the DTD or schema. """
ret = libxml2mod.xmlTextReaderIsDefault(self._o)
return ret
def IsEmptyElement(self):
"""Check if the current node is empty """
ret = libxml2mod.xmlTextReaderIsEmptyElement(self._o)
return ret
def IsNamespaceDecl(self):
"""Determine whether the current node is a namespace
declaration rather than a regular attribute. """
ret = libxml2mod.xmlTextReaderIsNamespaceDecl(self._o)
return ret
def IsValid(self):
"""Retrieve the validity status from the parser context """
ret = libxml2mod.xmlTextReaderIsValid(self._o)
return ret
def LocalName(self):
"""The local name of the node. """
ret = libxml2mod.xmlTextReaderConstLocalName(self._o)
return ret
def LookupNamespace(self, prefix):
"""Resolves a namespace prefix in the scope of the current
element. """
ret = libxml2mod.xmlTextReaderLookupNamespace(self._o, prefix)
return ret
def MoveToAttribute(self, name):
"""Moves the position of the current instance to the attribute
with the specified qualified name. """
ret = libxml2mod.xmlTextReaderMoveToAttribute(self._o, name)
return ret
def MoveToAttributeNo(self, no):
"""Moves the position of the current instance to the attribute
with the specified index relative to the containing element. """
ret = libxml2mod.xmlTextReaderMoveToAttributeNo(self._o, no)
return ret
def MoveToAttributeNs(self, localName, namespaceURI):
"""Moves the position of the current instance to the attribute
with the specified local name and namespace URI. """
ret = libxml2mod.xmlTextReaderMoveToAttributeNs(self._o, localName, namespaceURI)
return ret
def MoveToElement(self):
"""Moves the position of the current instance to the node that
contains the current Attribute node. """
ret = libxml2mod.xmlTextReaderMoveToElement(self._o)
return ret
def MoveToFirstAttribute(self):
"""Moves the position of the current instance to the first
attribute associated with the current node. """
ret = libxml2mod.xmlTextReaderMoveToFirstAttribute(self._o)
return ret
def MoveToNextAttribute(self):
"""Moves the position of the current instance to the next
attribute associated with the current node. """
ret = libxml2mod.xmlTextReaderMoveToNextAttribute(self._o)
return ret
def Name(self):
"""The qualified name of the node, equal to Prefix :LocalName. """
ret = libxml2mod.xmlTextReaderConstName(self._o)
return ret
def NamespaceUri(self):
"""The URI defining the namespace associated with the node. """
ret = libxml2mod.xmlTextReaderConstNamespaceUri(self._o)
return ret
def NewDoc(self, cur, URL, encoding, options):
"""Setup an xmltextReader to parse an XML in-memory document.
The parsing flags @options are a combination of
xmlParserOption. This reuses the existing @reader
xmlTextReader. """
ret = libxml2mod.xmlReaderNewDoc(self._o, cur, URL, encoding, options)
return ret
def NewFd(self, fd, URL, encoding, options):
"""Setup an xmltextReader to parse an XML from a file
descriptor. NOTE that the file descriptor will not be
closed when the reader is closed or reset. The parsing
flags @options are a combination of xmlParserOption. This
reuses the existing @reader xmlTextReader. """
ret = libxml2mod.xmlReaderNewFd(self._o, fd, URL, encoding, options)
return ret
def NewFile(self, filename, encoding, options):
"""parse an XML file from the filesystem or the network. The
parsing flags @options are a combination of
xmlParserOption. This reuses the existing @reader
xmlTextReader. """
ret = libxml2mod.xmlReaderNewFile(self._o, filename, encoding, options)
return ret
def NewMemory(self, buffer, size, URL, encoding, options):
"""Setup an xmltextReader to parse an XML in-memory document.
The parsing flags @options are a combination of
xmlParserOption. This reuses the existing @reader
xmlTextReader. """
ret = libxml2mod.xmlReaderNewMemory(self._o, buffer, size, URL, encoding, options)
return ret
def NewWalker(self, doc):
"""Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlReaderNewWalker(self._o, doc__o)
return ret
def Next(self):
"""Skip to the node following the current one in document
order while avoiding the subtree if any. """
ret = libxml2mod.xmlTextReaderNext(self._o)
return ret
def NextSibling(self):
"""Skip to the node following the current one in document
order while avoiding the subtree if any. Currently
implemented only for Readers built on a document """
ret = libxml2mod.xmlTextReaderNextSibling(self._o)
return ret
def NodeType(self):
"""Get the node type of the current node Reference:
http://www.gnu.org/software/dotgnu/pnetlib-doc/System/Xml/Xm
lNodeType.html """
ret = libxml2mod.xmlTextReaderNodeType(self._o)
return ret
def Normalization(self):
"""The value indicating whether to normalize white space and
attribute values. Since attribute value and end of line
normalizations are a MUST in the XML specification only the
value true is accepted. The broken bahaviour of accepting
out of range character entities like � is of course not
supported either. """
ret = libxml2mod.xmlTextReaderNormalization(self._o)
return ret
def Prefix(self):
"""A shorthand reference to the namespace associated with the
node. """
ret = libxml2mod.xmlTextReaderConstPrefix(self._o)
return ret
def Preserve(self):
"""This tells the XML Reader to preserve the current node. The
caller must also use xmlTextReaderCurrentDoc() to keep an
handle on the resulting document once parsing has finished """
ret = libxml2mod.xmlTextReaderPreserve(self._o)
if ret is None:raise treeError('xmlTextReaderPreserve() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def QuoteChar(self):
"""The quotation mark character used to enclose the value of
an attribute. """
ret = libxml2mod.xmlTextReaderQuoteChar(self._o)
return ret
def Read(self):
"""Moves the position of the current instance to the next node
in the stream, exposing its properties. """
ret = libxml2mod.xmlTextReaderRead(self._o)
return ret
def ReadAttributeValue(self):
"""Parses an attribute value into one or more Text and
EntityReference nodes. """
ret = libxml2mod.xmlTextReaderReadAttributeValue(self._o)
return ret
def ReadInnerXml(self):
"""Reads the contents of the current node, including child
nodes and markup. """
ret = libxml2mod.xmlTextReaderReadInnerXml(self._o)
return ret
def ReadOuterXml(self):
"""Reads the contents of the current node, including child
nodes and markup. """
ret = libxml2mod.xmlTextReaderReadOuterXml(self._o)
return ret
def ReadState(self):
"""Gets the read state of the reader. """
ret = libxml2mod.xmlTextReaderReadState(self._o)
return ret
def ReadString(self):
"""Reads the contents of an element or a text node as a string. """
ret = libxml2mod.xmlTextReaderReadString(self._o)
return ret
def RelaxNGSetSchema(self, schema):
"""Use RelaxNG to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then RelaxNG validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if schema is None: schema__o = None
else: schema__o = schema._o
ret = libxml2mod.xmlTextReaderRelaxNGSetSchema(self._o, schema__o)
return ret
def RelaxNGValidate(self, rng):
"""Use RelaxNG schema to validate the document as it is
processed. Activation is only possible before the first
Read(). If @rng is None, then RelaxNG schema validation is
deactivated. """
ret = libxml2mod.xmlTextReaderRelaxNGValidate(self._o, rng)
return ret
def RelaxNGValidateCtxt(self, ctxt, options):
"""Use RelaxNG schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then RelaxNG schema validation is
deactivated. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlTextReaderRelaxNGValidateCtxt(self._o, ctxt__o, options)
return ret
def SchemaValidate(self, xsd):
"""Use W3C XSD schema to validate the document as it is
processed. Activation is only possible before the first
Read(). If @xsd is None, then XML Schema validation is
deactivated. """
ret = libxml2mod.xmlTextReaderSchemaValidate(self._o, xsd)
return ret
def SchemaValidateCtxt(self, ctxt, options):
"""Use W3C XSD schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then XML Schema validation is
deactivated. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlTextReaderSchemaValidateCtxt(self._o, ctxt__o, options)
return ret
def SetParserProp(self, prop, value):
"""Change the parser processing behaviour by changing some of
its internal properties. Note that some properties can only
be changed before any read has been done. """
ret = libxml2mod.xmlTextReaderSetParserProp(self._o, prop, value)
return ret
def SetSchema(self, schema):
"""Use XSD Schema to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then Schema validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if schema is None: schema__o = None
else: schema__o = schema._o
ret = libxml2mod.xmlTextReaderSetSchema(self._o, schema__o)
return ret
def Setup(self, input, URL, encoding, options):
"""Setup an XML reader with new options """
if input is None: input__o = None
else: input__o = input._o
ret = libxml2mod.xmlTextReaderSetup(self._o, input__o, URL, encoding, options)
return ret
def Standalone(self):
"""Determine the standalone status of the document being read. """
ret = libxml2mod.xmlTextReaderStandalone(self._o)
return ret
def String(self, str):
"""Get an interned string from the reader, allows for example
to speedup string name comparisons """
ret = libxml2mod.xmlTextReaderConstString(self._o, str)
return ret
def Value(self):
"""Provides the text value of the node if present """
ret = libxml2mod.xmlTextReaderConstValue(self._o)
return ret
def XmlLang(self):
"""The xml:lang scope within which the node resides. """
ret = libxml2mod.xmlTextReaderConstXmlLang(self._o)
return ret
def XmlVersion(self):
"""Determine the XML version of the document being read. """
ret = libxml2mod.xmlTextReaderConstXmlVersion(self._o)
return ret
class URI:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeURI(self._o)
self._o = None
# accessors for URI
def authority(self):
"""Get the authority part from an URI """
ret = libxml2mod.xmlURIGetAuthority(self._o)
return ret
def fragment(self):
"""Get the fragment part from an URI """
ret = libxml2mod.xmlURIGetFragment(self._o)
return ret
def opaque(self):
"""Get the opaque part from an URI """
ret = libxml2mod.xmlURIGetOpaque(self._o)
return ret
def path(self):
"""Get the path part from an URI """
ret = libxml2mod.xmlURIGetPath(self._o)
return ret
def port(self):
"""Get the port part from an URI """
ret = libxml2mod.xmlURIGetPort(self._o)
return ret
def query(self):
"""Get the query part from an URI """
ret = libxml2mod.xmlURIGetQuery(self._o)
return ret
def queryRaw(self):
"""Get the raw query part from an URI (i.e. the unescaped
form). """
ret = libxml2mod.xmlURIGetQueryRaw(self._o)
return ret
def scheme(self):
"""Get the scheme part from an URI """
ret = libxml2mod.xmlURIGetScheme(self._o)
return ret
def server(self):
"""Get the server part from an URI """
ret = libxml2mod.xmlURIGetServer(self._o)
return ret
def setAuthority(self, authority):
"""Set the authority part of an URI. """
libxml2mod.xmlURISetAuthority(self._o, authority)
def setFragment(self, fragment):
"""Set the fragment part of an URI. """
libxml2mod.xmlURISetFragment(self._o, fragment)
def setOpaque(self, opaque):
"""Set the opaque part of an URI. """
libxml2mod.xmlURISetOpaque(self._o, opaque)
def setPath(self, path):
"""Set the path part of an URI. """
libxml2mod.xmlURISetPath(self._o, path)
def setPort(self, port):
"""Set the port part of an URI. """
libxml2mod.xmlURISetPort(self._o, port)
def setQuery(self, query):
"""Set the query part of an URI. """
libxml2mod.xmlURISetQuery(self._o, query)
def setQueryRaw(self, query_raw):
"""Set the raw query part of an URI (i.e. the unescaped form). """
libxml2mod.xmlURISetQueryRaw(self._o, query_raw)
def setScheme(self, scheme):
"""Set the scheme part of an URI. """
libxml2mod.xmlURISetScheme(self._o, scheme)
def setServer(self, server):
"""Set the server part of an URI. """
libxml2mod.xmlURISetServer(self._o, server)
def setUser(self, user):
"""Set the user part of an URI. """
libxml2mod.xmlURISetUser(self._o, user)
def user(self):
"""Get the user part from an URI """
ret = libxml2mod.xmlURIGetUser(self._o)
return ret
#
# URI functions from module uri
#
def parseURIReference(self, str):
"""Parse an URI reference string based on RFC 3986 and fills
in the appropriate fields of the @uri structure
URI-reference = URI / relative-ref """
ret = libxml2mod.xmlParseURIReference(self._o, str)
return ret
def printURI(self, stream):
"""Prints the URI in the stream @stream. """
libxml2mod.xmlPrintURI(stream, self._o)
def saveUri(self):
"""Save the URI as an escaped string """
ret = libxml2mod.xmlSaveUri(self._o)
return ret
class ValidCtxt(ValidCtxtCore):
def __init__(self, _obj=None):
self._o = _obj
ValidCtxtCore.__init__(self, _obj=_obj)
def __del__(self):
if self._o != None:
libxml2mod.xmlFreeValidCtxt(self._o)
self._o = None
#
# ValidCtxt functions from module valid
#
def validCtxtNormalizeAttributeValue(self, doc, elem, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character.
Also check VC: Standalone Document Declaration in P32, and
update ctxt->valid accordingly """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidCtxtNormalizeAttributeValue(self._o, doc__o, elem__o, name, value)
return ret
def validateDocument(self, doc):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDocument(self._o, doc__o)
return ret
def validateDocumentFinal(self, doc):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDocumentFinal(self._o, doc__o)
return ret
def validateDtd(self, doc, dtd):
"""Try to validate the document against the dtd instance
Basically it does check all the definitions in the DtD.
Note the the internal subset (if present) is de-coupled
(i.e. not used), which could give problems if ID or IDREF
is present. """
if doc is None: doc__o = None
else: doc__o = doc._o
if dtd is None: dtd__o = None
else: dtd__o = dtd._o
ret = libxml2mod.xmlValidateDtd(self._o, doc__o, dtd__o)
return ret
def validateDtdFinal(self, doc):
"""Does the final step for the dtds validation once all the
subsets have been parsed basically it does the following
checks described by the XML Rec - check that ENTITY and
ENTITIES type attributes default or possible values matches
one of the defined entities. - check that NOTATION type
attributes default or possible values matches one of the
defined notations. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDtdFinal(self._o, doc__o)
return ret
def validateElement(self, doc, elem):
"""Try to validate the subtree under an element """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateElement(self._o, doc__o, elem__o)
return ret
def validateNotationUse(self, doc, notationName):
"""Validate that the given name match a notation declaration.
- [ VC: Notation Declared ] """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateNotationUse(self._o, doc__o, notationName)
return ret
def validateOneAttribute(self, doc, elem, attr, value):
"""Try to validate a single attribute for an element basically
it does the following checks as described by the XML-1.0
recommendation: - [ VC: Attribute Value Type ] - [ VC:
Fixed Attribute Default ] - [ VC: Entity Name ] - [ VC:
Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC: Entity
Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlValidateOneAttribute(self._o, doc__o, elem__o, attr__o, value)
return ret
def validateOneElement(self, doc, elem):
"""Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o)
return ret
def validateOneNamespace(self, doc, elem, prefix, ns, value):
"""Try to validate a single namespace declaration for an
element basically it does the following checks as described
by the XML-1.0 recommendation: - [ VC: Attribute Value Type
] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] -
[ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC:
Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlValidateOneNamespace(self._o, doc__o, elem__o, prefix, ns__o, value)
return ret
def validatePopElement(self, doc, elem, qname):
"""Pop the element end from the validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePopElement(self._o, doc__o, elem__o, qname)
return ret
def validatePushCData(self, data, len):
"""check the CData parsed for validation in the current stack """
ret = libxml2mod.xmlValidatePushCData(self._o, data, len)
return ret
def validatePushElement(self, doc, elem, qname):
"""Push a new element start on the validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePushElement(self._o, doc__o, elem__o, qname)
return ret
def validateRoot(self, doc):
"""Try to validate a the root element basically it does the
following check as described by the XML-1.0 recommendation:
- [ VC: Root Element Type ] it doesn't try to recurse or
apply other check to the element """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateRoot(self._o, doc__o)
return ret
class xpathContext:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
# accessors for xpathContext
def contextDoc(self):
"""Get the doc from an xpathContext """
ret = libxml2mod.xmlXPathGetContextDoc(self._o)
if ret is None:raise xpathError('xmlXPathGetContextDoc() failed')
__tmp = xmlDoc(_obj=ret)
return __tmp
def contextNode(self):
"""Get the current node from an xpathContext """
ret = libxml2mod.xmlXPathGetContextNode(self._o)
if ret is None:raise xpathError('xmlXPathGetContextNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def contextPosition(self):
"""Get the current node from an xpathContext """
ret = libxml2mod.xmlXPathGetContextPosition(self._o)
return ret
def contextSize(self):
"""Get the current node from an xpathContext """
ret = libxml2mod.xmlXPathGetContextSize(self._o)
return ret
def function(self):
"""Get the current function name xpathContext """
ret = libxml2mod.xmlXPathGetFunction(self._o)
return ret
def functionURI(self):
"""Get the current function name URI xpathContext """
ret = libxml2mod.xmlXPathGetFunctionURI(self._o)
return ret
def setContextDoc(self, doc):
"""Set the doc of an xpathContext """
if doc is None: doc__o = None
else: doc__o = doc._o
libxml2mod.xmlXPathSetContextDoc(self._o, doc__o)
def setContextNode(self, node):
"""Set the current node of an xpathContext """
if node is None: node__o = None
else: node__o = node._o
libxml2mod.xmlXPathSetContextNode(self._o, node__o)
#
# xpathContext functions from module python
#
def registerXPathFunction(self, name, ns_uri, f):
"""Register a Python written function to the XPath interpreter """
ret = libxml2mod.xmlRegisterXPathFunction(self._o, name, ns_uri, f)
return ret
def xpathRegisterVariable(self, name, ns_uri, value):
"""Register a variable with the XPath context """
ret = libxml2mod.xmlXPathRegisterVariable(self._o, name, ns_uri, value)
return ret
#
# xpathContext functions from module xpath
#
def xpathContextSetCache(self, active, value, options):
"""Creates/frees an object cache on the XPath context. If
activates XPath objects (xmlXPathObject) will be cached
internally to be reused. @options: 0: This will set the
XPath object caching: @value: This will set the maximum
number of XPath objects to be cached per slot There are 5
slots for: node-set, string, number, boolean, and misc
objects. Use <0 for the default number (100). Other values
for @options have currently no effect. """
ret = libxml2mod.xmlXPathContextSetCache(self._o, active, value, options)
return ret
def xpathEval(self, str):
"""Evaluate the XPath Location Path in the given context. """
ret = libxml2mod.xmlXPathEval(str, self._o)
if ret is None:raise xpathError('xmlXPathEval() failed')
return xpathObjectRet(ret)
def xpathEvalExpression(self, str):
"""Alias for xmlXPathEval(). """
ret = libxml2mod.xmlXPathEvalExpression(str, self._o)
if ret is None:raise xpathError('xmlXPathEvalExpression() failed')
return xpathObjectRet(ret)
def xpathFreeContext(self):
"""Free up an xmlXPathContext """
libxml2mod.xmlXPathFreeContext(self._o)
#
# xpathContext functions from module xpathInternals
#
def xpathNewParserContext(self, str):
"""Create a new xmlXPathParserContext """
ret = libxml2mod.xmlXPathNewParserContext(str, self._o)
if ret is None:raise xpathError('xmlXPathNewParserContext() failed')
__tmp = xpathParserContext(_obj=ret)
return __tmp
def xpathNsLookup(self, prefix):
"""Search in the namespace declaration array of the context
for the given namespace name associated to the given prefix """
ret = libxml2mod.xmlXPathNsLookup(self._o, prefix)
return ret
def xpathRegisterAllFunctions(self):
"""Registers all default XPath functions in this context """
libxml2mod.xmlXPathRegisterAllFunctions(self._o)
def xpathRegisterNs(self, prefix, ns_uri):
"""Register a new namespace. If @ns_uri is None it unregisters
the namespace """
ret = libxml2mod.xmlXPathRegisterNs(self._o, prefix, ns_uri)
return ret
def xpathRegisteredFuncsCleanup(self):
"""Cleanup the XPath context data associated to registered
functions """
libxml2mod.xmlXPathRegisteredFuncsCleanup(self._o)
def xpathRegisteredNsCleanup(self):
"""Cleanup the XPath context data associated to registered
variables """
libxml2mod.xmlXPathRegisteredNsCleanup(self._o)
def xpathRegisteredVariablesCleanup(self):
"""Cleanup the XPath context data associated to registered
variables """
libxml2mod.xmlXPathRegisteredVariablesCleanup(self._o)
def xpathVariableLookup(self, name):
"""Search in the Variable array of the context for the given
variable value. """
ret = libxml2mod.xmlXPathVariableLookup(self._o, name)
if ret is None:raise xpathError('xmlXPathVariableLookup() failed')
return xpathObjectRet(ret)
def xpathVariableLookupNS(self, name, ns_uri):
"""Search in the Variable array of the context for the given
variable value. """
ret = libxml2mod.xmlXPathVariableLookupNS(self._o, name, ns_uri)
if ret is None:raise xpathError('xmlXPathVariableLookupNS() failed')
return xpathObjectRet(ret)
#
# xpathContext functions from module xpointer
#
def xpointerEval(self, str):
"""Evaluate the XPath Location Path in the given context. """
ret = libxml2mod.xmlXPtrEval(str, self._o)
if ret is None:raise treeError('xmlXPtrEval() failed')
return xpathObjectRet(ret)
class xpathParserContext:
def __init__(self, _obj=None):
if _obj != None:self._o = _obj;return
self._o = None
# accessors for xpathParserContext
def context(self):
"""Get the xpathContext from an xpathParserContext """
ret = libxml2mod.xmlXPathParserGetContext(self._o)
if ret is None:raise xpathError('xmlXPathParserGetContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp
#
# xpathParserContext functions from module xpathInternals
#
def xpathAddValues(self):
"""Implement the add operation on XPath objects: The numeric
operators convert their operands to numbers as if by
calling the number function. """
libxml2mod.xmlXPathAddValues(self._o)
def xpathBooleanFunction(self, nargs):
"""Implement the boolean() XPath function boolean
boolean(object) The boolean function converts its argument
to a boolean as follows: - a number is true if and only if
it is neither positive or negative zero nor NaN - a
node-set is true if and only if it is non-empty - a string
is true if and only if its length is non-zero """
libxml2mod.xmlXPathBooleanFunction(self._o, nargs)
def xpathCeilingFunction(self, nargs):
"""Implement the ceiling() XPath function number
ceiling(number) The ceiling function returns the smallest
(closest to negative infinity) number that is not less than
the argument and that is an integer. """
libxml2mod.xmlXPathCeilingFunction(self._o, nargs)
def xpathCompareValues(self, inf, strict):
"""Implement the compare operation on XPath objects: @arg1 <
@arg2 (1, 1, ... @arg1 <= @arg2 (1, 0, ... @arg1 >
@arg2 (0, 1, ... @arg1 >= @arg2 (0, 0, ... When
neither object to be compared is a node-set and the
operator is <=, <, >=, >, then the objects are compared by
converted both objects to numbers and comparing the numbers
according to IEEE 754. The < comparison will be true if and
only if the first number is less than the second number.
The <= comparison will be true if and only if the first
number is less than or equal to the second number. The >
comparison will be true if and only if the first number is
greater than the second number. The >= comparison will be
true if and only if the first number is greater than or
equal to the second number. """
ret = libxml2mod.xmlXPathCompareValues(self._o, inf, strict)
return ret
def xpathConcatFunction(self, nargs):
"""Implement the concat() XPath function string concat(string,
string, string*) The concat function returns the
concatenation of its arguments. """
libxml2mod.xmlXPathConcatFunction(self._o, nargs)
def xpathContainsFunction(self, nargs):
"""Implement the contains() XPath function boolean
contains(string, string) The contains function returns true
if the first argument string contains the second argument
string, and otherwise returns false. """
libxml2mod.xmlXPathContainsFunction(self._o, nargs)
def xpathCountFunction(self, nargs):
"""Implement the count() XPath function number count(node-set) """
libxml2mod.xmlXPathCountFunction(self._o, nargs)
def xpathDivValues(self):
"""Implement the div operation on XPath objects @arg1 / @arg2:
The numeric operators convert their operands to numbers as
if by calling the number function. """
libxml2mod.xmlXPathDivValues(self._o)
def xpathEqualValues(self):
"""Implement the equal operation on XPath objects content:
@arg1 == @arg2 """
ret = libxml2mod.xmlXPathEqualValues(self._o)
return ret
def xpathErr(self, error):
"""Handle an XPath error """
libxml2mod.xmlXPathErr(self._o, error)
def xpathEvalExpr(self):
"""Parse and evaluate an XPath expression in the given
context, then push the result on the context stack """
libxml2mod.xmlXPathEvalExpr(self._o)
def xpathFalseFunction(self, nargs):
"""Implement the false() XPath function boolean false() """
libxml2mod.xmlXPathFalseFunction(self._o, nargs)
def xpathFloorFunction(self, nargs):
"""Implement the floor() XPath function number floor(number)
The floor function returns the largest (closest to positive
infinity) number that is not greater than the argument and
that is an integer. """
libxml2mod.xmlXPathFloorFunction(self._o, nargs)
def xpathFreeParserContext(self):
"""Free up an xmlXPathParserContext """
libxml2mod.xmlXPathFreeParserContext(self._o)
def xpathIdFunction(self, nargs):
"""Implement the id() XPath function node-set id(object) The
id function selects elements by their unique ID (see [5.2.1
Unique IDs]). When the argument to id is of type node-set,
then the result is the union of the result of applying id
to the string value of each of the nodes in the argument
node-set. When the argument to id is of any other type, the
argument is converted to a string as if by a call to the
string function; the string is split into a
whitespace-separated list of tokens (whitespace is any
sequence of characters matching the production S); the
result is a node-set containing the elements in the same
document as the context node that have a unique ID equal to
any of the tokens in the list. """
libxml2mod.xmlXPathIdFunction(self._o, nargs)
def xpathLangFunction(self, nargs):
"""Implement the lang() XPath function boolean lang(string)
The lang function returns true or false depending on
whether the language of the context node as specified by
xml:lang attributes is the same as or is a sublanguage of
the language specified by the argument string. The language
of the context node is determined by the value of the
xml:lang attribute on the context node, or, if the context
node has no xml:lang attribute, by the value of the
xml:lang attribute on the nearest ancestor of the context
node that has an xml:lang attribute. If there is no such
attribute, then lang """
libxml2mod.xmlXPathLangFunction(self._o, nargs)
def xpathLastFunction(self, nargs):
"""Implement the last() XPath function number last() The last
function returns the number of nodes in the context node
list. """
libxml2mod.xmlXPathLastFunction(self._o, nargs)
def xpathLocalNameFunction(self, nargs):
"""Implement the local-name() XPath function string
local-name(node-set?) The local-name function returns a
string containing the local part of the name of the node in
the argument node-set that is first in document order. If
the node-set is empty or the first node has no name, an
empty string is returned. If the argument is omitted it
defaults to the context node. """
libxml2mod.xmlXPathLocalNameFunction(self._o, nargs)
def xpathModValues(self):
"""Implement the mod operation on XPath objects: @arg1 / @arg2
The numeric operators convert their operands to numbers as
if by calling the number function. """
libxml2mod.xmlXPathModValues(self._o)
def xpathMultValues(self):
"""Implement the multiply operation on XPath objects: The
numeric operators convert their operands to numbers as if
by calling the number function. """
libxml2mod.xmlXPathMultValues(self._o)
def xpathNamespaceURIFunction(self, nargs):
"""Implement the namespace-uri() XPath function string
namespace-uri(node-set?) The namespace-uri function returns
a string containing the namespace URI of the expanded name
of the node in the argument node-set that is first in
document order. If the node-set is empty, the first node
has no name, or the expanded name has no namespace URI, an
empty string is returned. If the argument is omitted it
defaults to the context node. """
libxml2mod.xmlXPathNamespaceURIFunction(self._o, nargs)
def xpathNextAncestor(self, cur):
"""Traversal function for the "ancestor" direction the
ancestor axis contains the ancestors of the context node;
the ancestors of the context node consist of the parent of
context node and the parent's parent and so on; the nodes
are ordered in reverse document order; thus the parent is
the first node on the axis, and the parent's parent is the
second node on the axis """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextAncestor(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextAncestor() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextAncestorOrSelf(self, cur):
"""Traversal function for the "ancestor-or-self" direction he
ancestor-or-self axis contains the context node and
ancestors of the context node in reverse document order;
thus the context node is the first node on the axis, and
the context node's parent the second; parent here is
defined the same as with the parent axis. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextAncestorOrSelf(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextAncestorOrSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextAttribute(self, cur):
"""Traversal function for the "attribute" direction TODO:
support DTD inherited default attributes """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextAttribute(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextAttribute() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextChild(self, cur):
"""Traversal function for the "child" direction The child axis
contains the children of the context node in document order. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextChild(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextDescendant(self, cur):
"""Traversal function for the "descendant" direction the
descendant axis contains the descendants of the context
node in document order; a descendant is a child or a child
of a child and so on. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextDescendant(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextDescendant() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextDescendantOrSelf(self, cur):
"""Traversal function for the "descendant-or-self" direction
the descendant-or-self axis contains the context node and
the descendants of the context node in document order; thus
the context node is the first node on the axis, and the
first child of the context node is the second node on the
axis """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextDescendantOrSelf(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextDescendantOrSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextFollowing(self, cur):
"""Traversal function for the "following" direction The
following axis contains all nodes in the same document as
the context node that are after the context node in
document order, excluding any descendants and excluding
attribute nodes and namespace nodes; the nodes are ordered
in document order """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextFollowing(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextFollowing() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextFollowingSibling(self, cur):
"""Traversal function for the "following-sibling" direction
The following-sibling axis contains the following siblings
of the context node in document order. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextFollowingSibling(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextFollowingSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextNamespace(self, cur):
"""Traversal function for the "namespace" direction the
namespace axis contains the namespace nodes of the context
node; the order of nodes on this axis is
implementation-defined; the axis will be empty unless the
context node is an element We keep the XML namespace node
at the end of the list. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextNamespace(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextNamespace() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextParent(self, cur):
"""Traversal function for the "parent" direction The parent
axis contains the parent of the context node, if there is
one. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextParent(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextParent() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextPreceding(self, cur):
"""Traversal function for the "preceding" direction the
preceding axis contains all nodes in the same document as
the context node that are before the context node in
document order, excluding any ancestors and excluding
attribute nodes and namespace nodes; the nodes are ordered
in reverse document order """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextPreceding(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextPreceding() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextPrecedingSibling(self, cur):
"""Traversal function for the "preceding-sibling" direction
The preceding-sibling axis contains the preceding siblings
of the context node in reverse document order; the first
preceding sibling is first on the axis; the sibling
preceding that node is the second on the axis and so on. """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextPrecedingSibling(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextPrecedingSibling() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNextSelf(self, cur):
"""Traversal function for the "self" direction The self axis
contains just the context node itself """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextSelf(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
def xpathNormalizeFunction(self, nargs):
"""Implement the normalize-space() XPath function string
normalize-space(string?) The normalize-space function
returns the argument string with white space normalized by
stripping leading and trailing whitespace and replacing
sequences of whitespace characters by a single space.
Whitespace characters are the same allowed by the S
production in XML. If the argument is omitted, it defaults
to the context node converted to a string, in other words
the value of the context node. """
libxml2mod.xmlXPathNormalizeFunction(self._o, nargs)
def xpathNotEqualValues(self):
"""Implement the equal operation on XPath objects content:
@arg1 == @arg2 """
ret = libxml2mod.xmlXPathNotEqualValues(self._o)
return ret
def xpathNotFunction(self, nargs):
"""Implement the not() XPath function boolean not(boolean) The
not function returns true if its argument is false, and
false otherwise. """
libxml2mod.xmlXPathNotFunction(self._o, nargs)
def xpathNumberFunction(self, nargs):
"""Implement the number() XPath function number number(object?) """
libxml2mod.xmlXPathNumberFunction(self._o, nargs)
def xpathParseNCName(self):
"""parse an XML namespace non qualified name. [NS 3] NCName
::= (Letter | '_') (NCNameChar)* [NS 4] NCNameChar ::=
Letter | Digit | '.' | '-' | '_' | CombiningChar | Extender """
ret = libxml2mod.xmlXPathParseNCName(self._o)
return ret
def xpathParseName(self):
"""parse an XML name [4] NameChar ::= Letter | Digit | '.' |
'-' | '_' | ':' | CombiningChar | Extender [5] Name ::=
(Letter | '_' | ':') (NameChar)* """
ret = libxml2mod.xmlXPathParseName(self._o)
return ret
def xpathPopBoolean(self):
"""Pops a boolean from the stack, handling conversion if
needed. Check error with #xmlXPathCheckError. """
ret = libxml2mod.xmlXPathPopBoolean(self._o)
return ret
def xpathPopNumber(self):
"""Pops a number from the stack, handling conversion if
needed. Check error with #xmlXPathCheckError. """
ret = libxml2mod.xmlXPathPopNumber(self._o)
return ret
def xpathPopString(self):
"""Pops a string from the stack, handling conversion if
needed. Check error with #xmlXPathCheckError. """
ret = libxml2mod.xmlXPathPopString(self._o)
return ret
def xpathPositionFunction(self, nargs):
"""Implement the position() XPath function number position()
The position function returns the position of the context
node in the context node list. The first position is 1, and
so the last position will be equal to last(). """
libxml2mod.xmlXPathPositionFunction(self._o, nargs)
def xpathRoot(self):
"""Initialize the context to the root of the document """
libxml2mod.xmlXPathRoot(self._o)
def xpathRoundFunction(self, nargs):
"""Implement the round() XPath function number round(number)
The round function returns the number that is closest to
the argument and that is an integer. If there are two such
numbers, then the one that is closest to positive infinity
is returned. """
libxml2mod.xmlXPathRoundFunction(self._o, nargs)
def xpathStartsWithFunction(self, nargs):
"""Implement the starts-with() XPath function boolean
starts-with(string, string) The starts-with function
returns true if the first argument string starts with the
second argument string, and otherwise returns false. """
libxml2mod.xmlXPathStartsWithFunction(self._o, nargs)
def xpathStringFunction(self, nargs):
"""Implement the string() XPath function string
string(object?) The string function converts an object to a
string as follows: - A node-set is converted to a string by
returning the value of the node in the node-set that is
first in document order. If the node-set is empty, an empty
string is returned. - A number is converted to a string as
follows + NaN is converted to the string NaN + positive
zero is converted to the string 0 + negative zero is
converted to the string 0 + positive infinity is converted
to the string Infinity + negative infinity is converted to
the string -Infinity + if the number is an integer, the
number is represented in decimal form as a Number with no
decimal point and no leading zeros, preceded by a minus
sign (-) if the number is negative + otherwise, the number
is represented in decimal form as a Number including a
decimal point with at least one digit before the decimal
point and at least one digit after the decimal point,
preceded by a minus sign (-) if the number is negative;
there must be no leading zeros before the decimal point
apart possibly from the one required digit immediately
before the decimal point; beyond the one required digit
after the decimal point there must be as many, but only as
many, more digits as are needed to uniquely distinguish the
number from all other IEEE 754 numeric values. - The
boolean false value is converted to the string false. The
boolean true value is converted to the string true. If the
argument is omitted, it defaults to a node-set with the
context node as its only member. """
libxml2mod.xmlXPathStringFunction(self._o, nargs)
def xpathStringLengthFunction(self, nargs):
"""Implement the string-length() XPath function number
string-length(string?) The string-length returns the number
of characters in the string (see [3.6 Strings]). If the
argument is omitted, it defaults to the context node
converted to a string, in other words the value of the
context node. """
libxml2mod.xmlXPathStringLengthFunction(self._o, nargs)
def xpathSubValues(self):
"""Implement the subtraction operation on XPath objects: The
numeric operators convert their operands to numbers as if
by calling the number function. """
libxml2mod.xmlXPathSubValues(self._o)
def xpathSubstringAfterFunction(self, nargs):
"""Implement the substring-after() XPath function string
substring-after(string, string) The substring-after
function returns the substring of the first argument string
that follows the first occurrence of the second argument
string in the first argument string, or the empty stringi
if the first argument string does not contain the second
argument string. For example,
substring-after("1999/04/01","/") returns 04/01, and
substring-after("1999/04/01","19") returns 99/04/01. """
libxml2mod.xmlXPathSubstringAfterFunction(self._o, nargs)
def xpathSubstringBeforeFunction(self, nargs):
"""Implement the substring-before() XPath function string
substring-before(string, string) The substring-before
function returns the substring of the first argument string
that precedes the first occurrence of the second argument
string in the first argument string, or the empty string if
the first argument string does not contain the second
argument string. For example,
substring-before("1999/04/01","/") returns 1999. """
libxml2mod.xmlXPathSubstringBeforeFunction(self._o, nargs)
def xpathSubstringFunction(self, nargs):
"""Implement the substring() XPath function string
substring(string, number, number?) The substring function
returns the substring of the first argument starting at the
position specified in the second argument with length
specified in the third argument. For example,
substring("12345",2,3) returns "234". If the third argument
is not specified, it returns the substring starting at the
position specified in the second argument and continuing to
the end of the string. For example, substring("12345",2)
returns "2345". More precisely, each character in the
string (see [3.6 Strings]) is considered to have a numeric
position: the position of the first character is 1, the
position of the second character is 2 and so on. The
returned substring contains those characters for which the
position of the character is greater than or equal to the
second argument and, if the third argument is specified,
less than the sum of the second and third arguments; the
comparisons and addition used for the above follow the
standard IEEE 754 rules. Thus: - substring("12345", 1.5,
2.6) returns "234" - substring("12345", 0, 3) returns "12"
- substring("12345", 0 div 0, 3) returns "" -
substring("12345", 1, 0 div 0) returns "" -
substring("12345", -42, 1 div 0) returns "12345" -
substring("12345", -1 div 0, 1 div 0) returns "" """
libxml2mod.xmlXPathSubstringFunction(self._o, nargs)
def xpathSumFunction(self, nargs):
"""Implement the sum() XPath function number sum(node-set) The
sum function returns the sum of the values of the nodes in
the argument node-set. """
libxml2mod.xmlXPathSumFunction(self._o, nargs)
def xpathTranslateFunction(self, nargs):
"""Implement the translate() XPath function string
translate(string, string, string) The translate function
returns the first argument string with occurrences of
characters in the second argument string replaced by the
character at the corresponding position in the third
argument string. For example, translate("bar","abc","ABC")
returns the string BAr. If there is a character in the
second argument string with no character at a corresponding
position in the third argument string (because the second
argument string is longer than the third argument string),
then occurrences of that character in the first argument
string are removed. For example,
translate("--aaa--","abc-","ABC") """
libxml2mod.xmlXPathTranslateFunction(self._o, nargs)
def xpathTrueFunction(self, nargs):
"""Implement the true() XPath function boolean true() """
libxml2mod.xmlXPathTrueFunction(self._o, nargs)
def xpathValueFlipSign(self):
"""Implement the unary - operation on an XPath object The
numeric operators convert their operands to numbers as if
by calling the number function. """
libxml2mod.xmlXPathValueFlipSign(self._o)
def xpatherror(self, file, line, no):
"""Formats an error message. """
libxml2mod.xmlXPatherror(self._o, file, line, no)
#
# xpathParserContext functions from module xpointer
#
def xpointerEvalRangePredicate(self):
"""[8] Predicate ::= '[' PredicateExpr ']' [9]
PredicateExpr ::= Expr Evaluate a predicate as in
xmlXPathEvalPredicate() but for a Location Set instead of a
node set """
libxml2mod.xmlXPtrEvalRangePredicate(self._o)
def xpointerRangeToFunction(self, nargs):
"""Implement the range-to() XPointer function Obsolete.
range-to is not a real function but a special type of
location step which is handled in xpath.c. """
libxml2mod.xmlXPtrRangeToFunction(self._o, nargs)
# xlinkShow
XLINK_SHOW_NONE = 0
XLINK_SHOW_NEW = 1
XLINK_SHOW_EMBED = 2
XLINK_SHOW_REPLACE = 3
# xmlRelaxNGParserFlag
XML_RELAXNGP_NONE = 0
XML_RELAXNGP_FREE_DOC = 1
XML_RELAXNGP_CRNG = 2
# xmlBufferAllocationScheme
XML_BUFFER_ALLOC_DOUBLEIT = 1
XML_BUFFER_ALLOC_EXACT = 2
XML_BUFFER_ALLOC_IMMUTABLE = 3
XML_BUFFER_ALLOC_IO = 4
XML_BUFFER_ALLOC_HYBRID = 5
XML_BUFFER_ALLOC_BOUNDED = 6
# xmlParserSeverities
XML_PARSER_SEVERITY_VALIDITY_WARNING = 1
XML_PARSER_SEVERITY_VALIDITY_ERROR = 2
XML_PARSER_SEVERITY_WARNING = 3
XML_PARSER_SEVERITY_ERROR = 4
# xmlAttributeDefault
XML_ATTRIBUTE_NONE = 1
XML_ATTRIBUTE_REQUIRED = 2
XML_ATTRIBUTE_IMPLIED = 3
XML_ATTRIBUTE_FIXED = 4
# xmlSchemaValType
XML_SCHEMAS_UNKNOWN = 0
XML_SCHEMAS_STRING = 1
XML_SCHEMAS_NORMSTRING = 2
XML_SCHEMAS_DECIMAL = 3
XML_SCHEMAS_TIME = 4
XML_SCHEMAS_GDAY = 5
XML_SCHEMAS_GMONTH = 6
XML_SCHEMAS_GMONTHDAY = 7
XML_SCHEMAS_GYEAR = 8
XML_SCHEMAS_GYEARMONTH = 9
XML_SCHEMAS_DATE = 10
XML_SCHEMAS_DATETIME = 11
XML_SCHEMAS_DURATION = 12
XML_SCHEMAS_FLOAT = 13
XML_SCHEMAS_DOUBLE = 14
XML_SCHEMAS_BOOLEAN = 15
XML_SCHEMAS_TOKEN = 16
XML_SCHEMAS_LANGUAGE = 17
XML_SCHEMAS_NMTOKEN = 18
XML_SCHEMAS_NMTOKENS = 19
XML_SCHEMAS_NAME = 20
XML_SCHEMAS_QNAME = 21
XML_SCHEMAS_NCNAME = 22
XML_SCHEMAS_ID = 23
XML_SCHEMAS_IDREF = 24
XML_SCHEMAS_IDREFS = 25
XML_SCHEMAS_ENTITY = 26
XML_SCHEMAS_ENTITIES = 27
XML_SCHEMAS_NOTATION = 28
XML_SCHEMAS_ANYURI = 29
XML_SCHEMAS_INTEGER = 30
XML_SCHEMAS_NPINTEGER = 31
XML_SCHEMAS_NINTEGER = 32
XML_SCHEMAS_NNINTEGER = 33
XML_SCHEMAS_PINTEGER = 34
XML_SCHEMAS_INT = 35
XML_SCHEMAS_UINT = 36
XML_SCHEMAS_LONG = 37
XML_SCHEMAS_ULONG = 38
XML_SCHEMAS_SHORT = 39
XML_SCHEMAS_USHORT = 40
XML_SCHEMAS_BYTE = 41
XML_SCHEMAS_UBYTE = 42
XML_SCHEMAS_HEXBINARY = 43
XML_SCHEMAS_BASE64BINARY = 44
XML_SCHEMAS_ANYTYPE = 45
XML_SCHEMAS_ANYSIMPLETYPE = 46
# xmlParserInputState
XML_PARSER_EOF = -1
XML_PARSER_START = 0
XML_PARSER_MISC = 1
XML_PARSER_PI = 2
XML_PARSER_DTD = 3
XML_PARSER_PROLOG = 4
XML_PARSER_COMMENT = 5
XML_PARSER_START_TAG = 6
XML_PARSER_CONTENT = 7
XML_PARSER_CDATA_SECTION = 8
XML_PARSER_END_TAG = 9
XML_PARSER_ENTITY_DECL = 10
XML_PARSER_ENTITY_VALUE = 11
XML_PARSER_ATTRIBUTE_VALUE = 12
XML_PARSER_SYSTEM_LITERAL = 13
XML_PARSER_EPILOG = 14
XML_PARSER_IGNORE = 15
XML_PARSER_PUBLIC_LITERAL = 16
# xmlEntityType
XML_INTERNAL_GENERAL_ENTITY = 1
XML_EXTERNAL_GENERAL_PARSED_ENTITY = 2
XML_EXTERNAL_GENERAL_UNPARSED_ENTITY = 3
XML_INTERNAL_PARAMETER_ENTITY = 4
XML_EXTERNAL_PARAMETER_ENTITY = 5
XML_INTERNAL_PREDEFINED_ENTITY = 6
# xmlSaveOption
XML_SAVE_FORMAT = 1
XML_SAVE_NO_DECL = 2
XML_SAVE_NO_EMPTY = 4
XML_SAVE_NO_XHTML = 8
XML_SAVE_XHTML = 16
XML_SAVE_AS_XML = 32
XML_SAVE_AS_HTML = 64
XML_SAVE_WSNONSIG = 128
# xmlPatternFlags
XML_PATTERN_DEFAULT = 0
XML_PATTERN_XPATH = 1
XML_PATTERN_XSSEL = 2
XML_PATTERN_XSFIELD = 4
# xmlParserErrors
XML_ERR_OK = 0
XML_ERR_INTERNAL_ERROR = 1
XML_ERR_NO_MEMORY = 2
XML_ERR_DOCUMENT_START = 3
XML_ERR_DOCUMENT_EMPTY = 4
XML_ERR_DOCUMENT_END = 5
XML_ERR_INVALID_HEX_CHARREF = 6
XML_ERR_INVALID_DEC_CHARREF = 7
XML_ERR_INVALID_CHARREF = 8
XML_ERR_INVALID_CHAR = 9
XML_ERR_CHARREF_AT_EOF = 10
XML_ERR_CHARREF_IN_PROLOG = 11
XML_ERR_CHARREF_IN_EPILOG = 12
XML_ERR_CHARREF_IN_DTD = 13
XML_ERR_ENTITYREF_AT_EOF = 14
XML_ERR_ENTITYREF_IN_PROLOG = 15
XML_ERR_ENTITYREF_IN_EPILOG = 16
XML_ERR_ENTITYREF_IN_DTD = 17
XML_ERR_PEREF_AT_EOF = 18
XML_ERR_PEREF_IN_PROLOG = 19
XML_ERR_PEREF_IN_EPILOG = 20
XML_ERR_PEREF_IN_INT_SUBSET = 21
XML_ERR_ENTITYREF_NO_NAME = 22
XML_ERR_ENTITYREF_SEMICOL_MISSING = 23
XML_ERR_PEREF_NO_NAME = 24
XML_ERR_PEREF_SEMICOL_MISSING = 25
XML_ERR_UNDECLARED_ENTITY = 26
XML_WAR_UNDECLARED_ENTITY = 27
XML_ERR_UNPARSED_ENTITY = 28
XML_ERR_ENTITY_IS_EXTERNAL = 29
XML_ERR_ENTITY_IS_PARAMETER = 30
XML_ERR_UNKNOWN_ENCODING = 31
XML_ERR_UNSUPPORTED_ENCODING = 32
XML_ERR_STRING_NOT_STARTED = 33
XML_ERR_STRING_NOT_CLOSED = 34
XML_ERR_NS_DECL_ERROR = 35
XML_ERR_ENTITY_NOT_STARTED = 36
XML_ERR_ENTITY_NOT_FINISHED = 37
XML_ERR_LT_IN_ATTRIBUTE = 38
XML_ERR_ATTRIBUTE_NOT_STARTED = 39
XML_ERR_ATTRIBUTE_NOT_FINISHED = 40
XML_ERR_ATTRIBUTE_WITHOUT_VALUE = 41
XML_ERR_ATTRIBUTE_REDEFINED = 42
XML_ERR_LITERAL_NOT_STARTED = 43
XML_ERR_LITERAL_NOT_FINISHED = 44
XML_ERR_COMMENT_NOT_FINISHED = 45
XML_ERR_PI_NOT_STARTED = 46
XML_ERR_PI_NOT_FINISHED = 47
XML_ERR_NOTATION_NOT_STARTED = 48
XML_ERR_NOTATION_NOT_FINISHED = 49
XML_ERR_ATTLIST_NOT_STARTED = 50
XML_ERR_ATTLIST_NOT_FINISHED = 51
XML_ERR_MIXED_NOT_STARTED = 52
XML_ERR_MIXED_NOT_FINISHED = 53
XML_ERR_ELEMCONTENT_NOT_STARTED = 54
XML_ERR_ELEMCONTENT_NOT_FINISHED = 55
XML_ERR_XMLDECL_NOT_STARTED = 56
XML_ERR_XMLDECL_NOT_FINISHED = 57
XML_ERR_CONDSEC_NOT_STARTED = 58
XML_ERR_CONDSEC_NOT_FINISHED = 59
XML_ERR_EXT_SUBSET_NOT_FINISHED = 60
XML_ERR_DOCTYPE_NOT_FINISHED = 61
XML_ERR_MISPLACED_CDATA_END = 62
XML_ERR_CDATA_NOT_FINISHED = 63
XML_ERR_RESERVED_XML_NAME = 64
XML_ERR_SPACE_REQUIRED = 65
XML_ERR_SEPARATOR_REQUIRED = 66
XML_ERR_NMTOKEN_REQUIRED = 67
XML_ERR_NAME_REQUIRED = 68
XML_ERR_PCDATA_REQUIRED = 69
XML_ERR_URI_REQUIRED = 70
XML_ERR_PUBID_REQUIRED = 71
XML_ERR_LT_REQUIRED = 72
XML_ERR_GT_REQUIRED = 73
XML_ERR_LTSLASH_REQUIRED = 74
XML_ERR_EQUAL_REQUIRED = 75
XML_ERR_TAG_NAME_MISMATCH = 76
XML_ERR_TAG_NOT_FINISHED = 77
XML_ERR_STANDALONE_VALUE = 78
XML_ERR_ENCODING_NAME = 79
XML_ERR_HYPHEN_IN_COMMENT = 80
XML_ERR_INVALID_ENCODING = 81
XML_ERR_EXT_ENTITY_STANDALONE = 82
XML_ERR_CONDSEC_INVALID = 83
XML_ERR_VALUE_REQUIRED = 84
XML_ERR_NOT_WELL_BALANCED = 85
XML_ERR_EXTRA_CONTENT = 86
XML_ERR_ENTITY_CHAR_ERROR = 87
XML_ERR_ENTITY_PE_INTERNAL = 88
XML_ERR_ENTITY_LOOP = 89
XML_ERR_ENTITY_BOUNDARY = 90
XML_ERR_INVALID_URI = 91
XML_ERR_URI_FRAGMENT = 92
XML_WAR_CATALOG_PI = 93
XML_ERR_NO_DTD = 94
XML_ERR_CONDSEC_INVALID_KEYWORD = 95
XML_ERR_VERSION_MISSING = 96
XML_WAR_UNKNOWN_VERSION = 97
XML_WAR_LANG_VALUE = 98
XML_WAR_NS_URI = 99
XML_WAR_NS_URI_RELATIVE = 100
XML_ERR_MISSING_ENCODING = 101
XML_WAR_SPACE_VALUE = 102
XML_ERR_NOT_STANDALONE = 103
XML_ERR_ENTITY_PROCESSING = 104
XML_ERR_NOTATION_PROCESSING = 105
XML_WAR_NS_COLUMN = 106
XML_WAR_ENTITY_REDEFINED = 107
XML_ERR_UNKNOWN_VERSION = 108
XML_ERR_VERSION_MISMATCH = 109
XML_ERR_NAME_TOO_LONG = 110
XML_ERR_USER_STOP = 111
XML_NS_ERR_XML_NAMESPACE = 200
XML_NS_ERR_UNDEFINED_NAMESPACE = 201
XML_NS_ERR_QNAME = 202
XML_NS_ERR_ATTRIBUTE_REDEFINED = 203
XML_NS_ERR_EMPTY = 204
XML_NS_ERR_COLON = 205
XML_DTD_ATTRIBUTE_DEFAULT = 500
XML_DTD_ATTRIBUTE_REDEFINED = 501
XML_DTD_ATTRIBUTE_VALUE = 502
XML_DTD_CONTENT_ERROR = 503
XML_DTD_CONTENT_MODEL = 504
XML_DTD_CONTENT_NOT_DETERMINIST = 505
XML_DTD_DIFFERENT_PREFIX = 506
XML_DTD_ELEM_DEFAULT_NAMESPACE = 507
XML_DTD_ELEM_NAMESPACE = 508
XML_DTD_ELEM_REDEFINED = 509
XML_DTD_EMPTY_NOTATION = 510
XML_DTD_ENTITY_TYPE = 511
XML_DTD_ID_FIXED = 512
XML_DTD_ID_REDEFINED = 513
XML_DTD_ID_SUBSET = 514
XML_DTD_INVALID_CHILD = 515
XML_DTD_INVALID_DEFAULT = 516
XML_DTD_LOAD_ERROR = 517
XML_DTD_MISSING_ATTRIBUTE = 518
XML_DTD_MIXED_CORRUPT = 519
XML_DTD_MULTIPLE_ID = 520
XML_DTD_NO_DOC = 521
XML_DTD_NO_DTD = 522
XML_DTD_NO_ELEM_NAME = 523
XML_DTD_NO_PREFIX = 524
XML_DTD_NO_ROOT = 525
XML_DTD_NOTATION_REDEFINED = 526
XML_DTD_NOTATION_VALUE = 527
XML_DTD_NOT_EMPTY = 528
XML_DTD_NOT_PCDATA = 529
XML_DTD_NOT_STANDALONE = 530
XML_DTD_ROOT_NAME = 531
XML_DTD_STANDALONE_WHITE_SPACE = 532
XML_DTD_UNKNOWN_ATTRIBUTE = 533
XML_DTD_UNKNOWN_ELEM = 534
XML_DTD_UNKNOWN_ENTITY = 535
XML_DTD_UNKNOWN_ID = 536
XML_DTD_UNKNOWN_NOTATION = 537
XML_DTD_STANDALONE_DEFAULTED = 538
XML_DTD_XMLID_VALUE = 539
XML_DTD_XMLID_TYPE = 540
XML_DTD_DUP_TOKEN = 541
XML_HTML_STRUCURE_ERROR = 800
XML_HTML_UNKNOWN_TAG = 801
XML_RNGP_ANYNAME_ATTR_ANCESTOR = 1000
XML_RNGP_ATTR_CONFLICT = 1001
XML_RNGP_ATTRIBUTE_CHILDREN = 1002
XML_RNGP_ATTRIBUTE_CONTENT = 1003
XML_RNGP_ATTRIBUTE_EMPTY = 1004
XML_RNGP_ATTRIBUTE_NOOP = 1005
XML_RNGP_CHOICE_CONTENT = 1006
XML_RNGP_CHOICE_EMPTY = 1007
XML_RNGP_CREATE_FAILURE = 1008
XML_RNGP_DATA_CONTENT = 1009
XML_RNGP_DEF_CHOICE_AND_INTERLEAVE = 1010
XML_RNGP_DEFINE_CREATE_FAILED = 1011
XML_RNGP_DEFINE_EMPTY = 1012
XML_RNGP_DEFINE_MISSING = 1013
XML_RNGP_DEFINE_NAME_MISSING = 1014
XML_RNGP_ELEM_CONTENT_EMPTY = 1015
XML_RNGP_ELEM_CONTENT_ERROR = 1016
XML_RNGP_ELEMENT_EMPTY = 1017
XML_RNGP_ELEMENT_CONTENT = 1018
XML_RNGP_ELEMENT_NAME = 1019
XML_RNGP_ELEMENT_NO_CONTENT = 1020
XML_RNGP_ELEM_TEXT_CONFLICT = 1021
XML_RNGP_EMPTY = 1022
XML_RNGP_EMPTY_CONSTRUCT = 1023
XML_RNGP_EMPTY_CONTENT = 1024
XML_RNGP_EMPTY_NOT_EMPTY = 1025
XML_RNGP_ERROR_TYPE_LIB = 1026
XML_RNGP_EXCEPT_EMPTY = 1027
XML_RNGP_EXCEPT_MISSING = 1028
XML_RNGP_EXCEPT_MULTIPLE = 1029
XML_RNGP_EXCEPT_NO_CONTENT = 1030
XML_RNGP_EXTERNALREF_EMTPY = 1031
XML_RNGP_EXTERNAL_REF_FAILURE = 1032
XML_RNGP_EXTERNALREF_RECURSE = 1033
XML_RNGP_FORBIDDEN_ATTRIBUTE = 1034
XML_RNGP_FOREIGN_ELEMENT = 1035
XML_RNGP_GRAMMAR_CONTENT = 1036
XML_RNGP_GRAMMAR_EMPTY = 1037
XML_RNGP_GRAMMAR_MISSING = 1038
XML_RNGP_GRAMMAR_NO_START = 1039
XML_RNGP_GROUP_ATTR_CONFLICT = 1040
XML_RNGP_HREF_ERROR = 1041
XML_RNGP_INCLUDE_EMPTY = 1042
XML_RNGP_INCLUDE_FAILURE = 1043
XML_RNGP_INCLUDE_RECURSE = 1044
XML_RNGP_INTERLEAVE_ADD = 1045
XML_RNGP_INTERLEAVE_CREATE_FAILED = 1046
XML_RNGP_INTERLEAVE_EMPTY = 1047
XML_RNGP_INTERLEAVE_NO_CONTENT = 1048
XML_RNGP_INVALID_DEFINE_NAME = 1049
XML_RNGP_INVALID_URI = 1050
XML_RNGP_INVALID_VALUE = 1051
XML_RNGP_MISSING_HREF = 1052
XML_RNGP_NAME_MISSING = 1053
XML_RNGP_NEED_COMBINE = 1054
XML_RNGP_NOTALLOWED_NOT_EMPTY = 1055
XML_RNGP_NSNAME_ATTR_ANCESTOR = 1056
XML_RNGP_NSNAME_NO_NS = 1057
XML_RNGP_PARAM_FORBIDDEN = 1058
XML_RNGP_PARAM_NAME_MISSING = 1059
XML_RNGP_PARENTREF_CREATE_FAILED = 1060
XML_RNGP_PARENTREF_NAME_INVALID = 1061
XML_RNGP_PARENTREF_NO_NAME = 1062
XML_RNGP_PARENTREF_NO_PARENT = 1063
XML_RNGP_PARENTREF_NOT_EMPTY = 1064
XML_RNGP_PARSE_ERROR = 1065
XML_RNGP_PAT_ANYNAME_EXCEPT_ANYNAME = 1066
XML_RNGP_PAT_ATTR_ATTR = 1067
XML_RNGP_PAT_ATTR_ELEM = 1068
XML_RNGP_PAT_DATA_EXCEPT_ATTR = 1069
XML_RNGP_PAT_DATA_EXCEPT_ELEM = 1070
XML_RNGP_PAT_DATA_EXCEPT_EMPTY = 1071
XML_RNGP_PAT_DATA_EXCEPT_GROUP = 1072
XML_RNGP_PAT_DATA_EXCEPT_INTERLEAVE = 1073
XML_RNGP_PAT_DATA_EXCEPT_LIST = 1074
XML_RNGP_PAT_DATA_EXCEPT_ONEMORE = 1075
XML_RNGP_PAT_DATA_EXCEPT_REF = 1076
XML_RNGP_PAT_DATA_EXCEPT_TEXT = 1077
XML_RNGP_PAT_LIST_ATTR = 1078
XML_RNGP_PAT_LIST_ELEM = 1079
XML_RNGP_PAT_LIST_INTERLEAVE = 1080
XML_RNGP_PAT_LIST_LIST = 1081
XML_RNGP_PAT_LIST_REF = 1082
XML_RNGP_PAT_LIST_TEXT = 1083
XML_RNGP_PAT_NSNAME_EXCEPT_ANYNAME = 1084
XML_RNGP_PAT_NSNAME_EXCEPT_NSNAME = 1085
XML_RNGP_PAT_ONEMORE_GROUP_ATTR = 1086
XML_RNGP_PAT_ONEMORE_INTERLEAVE_ATTR = 1087
XML_RNGP_PAT_START_ATTR = 1088
XML_RNGP_PAT_START_DATA = 1089
XML_RNGP_PAT_START_EMPTY = 1090
XML_RNGP_PAT_START_GROUP = 1091
XML_RNGP_PAT_START_INTERLEAVE = 1092
XML_RNGP_PAT_START_LIST = 1093
XML_RNGP_PAT_START_ONEMORE = 1094
XML_RNGP_PAT_START_TEXT = 1095
XML_RNGP_PAT_START_VALUE = 1096
XML_RNGP_PREFIX_UNDEFINED = 1097
XML_RNGP_REF_CREATE_FAILED = 1098
XML_RNGP_REF_CYCLE = 1099
XML_RNGP_REF_NAME_INVALID = 1100
XML_RNGP_REF_NO_DEF = 1101
XML_RNGP_REF_NO_NAME = 1102
XML_RNGP_REF_NOT_EMPTY = 1103
XML_RNGP_START_CHOICE_AND_INTERLEAVE = 1104
XML_RNGP_START_CONTENT = 1105
XML_RNGP_START_EMPTY = 1106
XML_RNGP_START_MISSING = 1107
XML_RNGP_TEXT_EXPECTED = 1108
XML_RNGP_TEXT_HAS_CHILD = 1109
XML_RNGP_TYPE_MISSING = 1110
XML_RNGP_TYPE_NOT_FOUND = 1111
XML_RNGP_TYPE_VALUE = 1112
XML_RNGP_UNKNOWN_ATTRIBUTE = 1113
XML_RNGP_UNKNOWN_COMBINE = 1114
XML_RNGP_UNKNOWN_CONSTRUCT = 1115
XML_RNGP_UNKNOWN_TYPE_LIB = 1116
XML_RNGP_URI_FRAGMENT = 1117
XML_RNGP_URI_NOT_ABSOLUTE = 1118
XML_RNGP_VALUE_EMPTY = 1119
XML_RNGP_VALUE_NO_CONTENT = 1120
XML_RNGP_XMLNS_NAME = 1121
XML_RNGP_XML_NS = 1122
XML_XPATH_EXPRESSION_OK = 1200
XML_XPATH_NUMBER_ERROR = 1201
XML_XPATH_UNFINISHED_LITERAL_ERROR = 1202
XML_XPATH_START_LITERAL_ERROR = 1203
XML_XPATH_VARIABLE_REF_ERROR = 1204
XML_XPATH_UNDEF_VARIABLE_ERROR = 1205
XML_XPATH_INVALID_PREDICATE_ERROR = 1206
XML_XPATH_EXPR_ERROR = 1207
XML_XPATH_UNCLOSED_ERROR = 1208
XML_XPATH_UNKNOWN_FUNC_ERROR = 1209
XML_XPATH_INVALID_OPERAND = 1210
XML_XPATH_INVALID_TYPE = 1211
XML_XPATH_INVALID_ARITY = 1212
XML_XPATH_INVALID_CTXT_SIZE = 1213
XML_XPATH_INVALID_CTXT_POSITION = 1214
XML_XPATH_MEMORY_ERROR = 1215
XML_XPTR_SYNTAX_ERROR = 1216
XML_XPTR_RESOURCE_ERROR = 1217
XML_XPTR_SUB_RESOURCE_ERROR = 1218
XML_XPATH_UNDEF_PREFIX_ERROR = 1219
XML_XPATH_ENCODING_ERROR = 1220
XML_XPATH_INVALID_CHAR_ERROR = 1221
XML_TREE_INVALID_HEX = 1300
XML_TREE_INVALID_DEC = 1301
XML_TREE_UNTERMINATED_ENTITY = 1302
XML_TREE_NOT_UTF8 = 1303
XML_SAVE_NOT_UTF8 = 1400
XML_SAVE_CHAR_INVALID = 1401
XML_SAVE_NO_DOCTYPE = 1402
XML_SAVE_UNKNOWN_ENCODING = 1403
XML_REGEXP_COMPILE_ERROR = 1450
XML_IO_UNKNOWN = 1500
XML_IO_EACCES = 1501
XML_IO_EAGAIN = 1502
XML_IO_EBADF = 1503
XML_IO_EBADMSG = 1504
XML_IO_EBUSY = 1505
XML_IO_ECANCELED = 1506
XML_IO_ECHILD = 1507
XML_IO_EDEADLK = 1508
XML_IO_EDOM = 1509
XML_IO_EEXIST = 1510
XML_IO_EFAULT = 1511
XML_IO_EFBIG = 1512
XML_IO_EINPROGRESS = 1513
XML_IO_EINTR = 1514
XML_IO_EINVAL = 1515
XML_IO_EIO = 1516
XML_IO_EISDIR = 1517
XML_IO_EMFILE = 1518
XML_IO_EMLINK = 1519
XML_IO_EMSGSIZE = 1520
XML_IO_ENAMETOOLONG = 1521
XML_IO_ENFILE = 1522
XML_IO_ENODEV = 1523
XML_IO_ENOENT = 1524
XML_IO_ENOEXEC = 1525
XML_IO_ENOLCK = 1526
XML_IO_ENOMEM = 1527
XML_IO_ENOSPC = 1528
XML_IO_ENOSYS = 1529
XML_IO_ENOTDIR = 1530
XML_IO_ENOTEMPTY = 1531
XML_IO_ENOTSUP = 1532
XML_IO_ENOTTY = 1533
XML_IO_ENXIO = 1534
XML_IO_EPERM = 1535
XML_IO_EPIPE = 1536
XML_IO_ERANGE = 1537
XML_IO_EROFS = 1538
XML_IO_ESPIPE = 1539
XML_IO_ESRCH = 1540
XML_IO_ETIMEDOUT = 1541
XML_IO_EXDEV = 1542
XML_IO_NETWORK_ATTEMPT = 1543
XML_IO_ENCODER = 1544
XML_IO_FLUSH = 1545
XML_IO_WRITE = 1546
XML_IO_NO_INPUT = 1547
XML_IO_BUFFER_FULL = 1548
XML_IO_LOAD_ERROR = 1549
XML_IO_ENOTSOCK = 1550
XML_IO_EISCONN = 1551
XML_IO_ECONNREFUSED = 1552
XML_IO_ENETUNREACH = 1553
XML_IO_EADDRINUSE = 1554
XML_IO_EALREADY = 1555
XML_IO_EAFNOSUPPORT = 1556
XML_XINCLUDE_RECURSION = 1600
XML_XINCLUDE_PARSE_VALUE = 1601
XML_XINCLUDE_ENTITY_DEF_MISMATCH = 1602
XML_XINCLUDE_NO_HREF = 1603
XML_XINCLUDE_NO_FALLBACK = 1604
XML_XINCLUDE_HREF_URI = 1605
XML_XINCLUDE_TEXT_FRAGMENT = 1606
XML_XINCLUDE_TEXT_DOCUMENT = 1607
XML_XINCLUDE_INVALID_CHAR = 1608
XML_XINCLUDE_BUILD_FAILED = 1609
XML_XINCLUDE_UNKNOWN_ENCODING = 1610
XML_XINCLUDE_MULTIPLE_ROOT = 1611
XML_XINCLUDE_XPTR_FAILED = 1612
XML_XINCLUDE_XPTR_RESULT = 1613
XML_XINCLUDE_INCLUDE_IN_INCLUDE = 1614
XML_XINCLUDE_FALLBACKS_IN_INCLUDE = 1615
XML_XINCLUDE_FALLBACK_NOT_IN_INCLUDE = 1616
XML_XINCLUDE_DEPRECATED_NS = 1617
XML_XINCLUDE_FRAGMENT_ID = 1618
XML_CATALOG_MISSING_ATTR = 1650
XML_CATALOG_ENTRY_BROKEN = 1651
XML_CATALOG_PREFER_VALUE = 1652
XML_CATALOG_NOT_CATALOG = 1653
XML_CATALOG_RECURSION = 1654
XML_SCHEMAP_PREFIX_UNDEFINED = 1700
XML_SCHEMAP_ATTRFORMDEFAULT_VALUE = 1701
XML_SCHEMAP_ATTRGRP_NONAME_NOREF = 1702
XML_SCHEMAP_ATTR_NONAME_NOREF = 1703
XML_SCHEMAP_COMPLEXTYPE_NONAME_NOREF = 1704
XML_SCHEMAP_ELEMFORMDEFAULT_VALUE = 1705
XML_SCHEMAP_ELEM_NONAME_NOREF = 1706
XML_SCHEMAP_EXTENSION_NO_BASE = 1707
XML_SCHEMAP_FACET_NO_VALUE = 1708
XML_SCHEMAP_FAILED_BUILD_IMPORT = 1709
XML_SCHEMAP_GROUP_NONAME_NOREF = 1710
XML_SCHEMAP_IMPORT_NAMESPACE_NOT_URI = 1711
XML_SCHEMAP_IMPORT_REDEFINE_NSNAME = 1712
XML_SCHEMAP_IMPORT_SCHEMA_NOT_URI = 1713
XML_SCHEMAP_INVALID_BOOLEAN = 1714
XML_SCHEMAP_INVALID_ENUM = 1715
XML_SCHEMAP_INVALID_FACET = 1716
XML_SCHEMAP_INVALID_FACET_VALUE = 1717
XML_SCHEMAP_INVALID_MAXOCCURS = 1718
XML_SCHEMAP_INVALID_MINOCCURS = 1719
XML_SCHEMAP_INVALID_REF_AND_SUBTYPE = 1720
XML_SCHEMAP_INVALID_WHITE_SPACE = 1721
XML_SCHEMAP_NOATTR_NOREF = 1722
XML_SCHEMAP_NOTATION_NO_NAME = 1723
XML_SCHEMAP_NOTYPE_NOREF = 1724
XML_SCHEMAP_REF_AND_SUBTYPE = 1725
XML_SCHEMAP_RESTRICTION_NONAME_NOREF = 1726
XML_SCHEMAP_SIMPLETYPE_NONAME = 1727
XML_SCHEMAP_TYPE_AND_SUBTYPE = 1728
XML_SCHEMAP_UNKNOWN_ALL_CHILD = 1729
XML_SCHEMAP_UNKNOWN_ANYATTRIBUTE_CHILD = 1730
XML_SCHEMAP_UNKNOWN_ATTR_CHILD = 1731
XML_SCHEMAP_UNKNOWN_ATTRGRP_CHILD = 1732
XML_SCHEMAP_UNKNOWN_ATTRIBUTE_GROUP = 1733
XML_SCHEMAP_UNKNOWN_BASE_TYPE = 1734
XML_SCHEMAP_UNKNOWN_CHOICE_CHILD = 1735
XML_SCHEMAP_UNKNOWN_COMPLEXCONTENT_CHILD = 1736
XML_SCHEMAP_UNKNOWN_COMPLEXTYPE_CHILD = 1737
XML_SCHEMAP_UNKNOWN_ELEM_CHILD = 1738
XML_SCHEMAP_UNKNOWN_EXTENSION_CHILD = 1739
XML_SCHEMAP_UNKNOWN_FACET_CHILD = 1740
XML_SCHEMAP_UNKNOWN_FACET_TYPE = 1741
XML_SCHEMAP_UNKNOWN_GROUP_CHILD = 1742
XML_SCHEMAP_UNKNOWN_IMPORT_CHILD = 1743
XML_SCHEMAP_UNKNOWN_LIST_CHILD = 1744
XML_SCHEMAP_UNKNOWN_NOTATION_CHILD = 1745
XML_SCHEMAP_UNKNOWN_PROCESSCONTENT_CHILD = 1746
XML_SCHEMAP_UNKNOWN_REF = 1747
XML_SCHEMAP_UNKNOWN_RESTRICTION_CHILD = 1748
XML_SCHEMAP_UNKNOWN_SCHEMAS_CHILD = 1749
XML_SCHEMAP_UNKNOWN_SEQUENCE_CHILD = 1750
XML_SCHEMAP_UNKNOWN_SIMPLECONTENT_CHILD = 1751
XML_SCHEMAP_UNKNOWN_SIMPLETYPE_CHILD = 1752
XML_SCHEMAP_UNKNOWN_TYPE = 1753
XML_SCHEMAP_UNKNOWN_UNION_CHILD = 1754
XML_SCHEMAP_ELEM_DEFAULT_FIXED = 1755
XML_SCHEMAP_REGEXP_INVALID = 1756
XML_SCHEMAP_FAILED_LOAD = 1757
XML_SCHEMAP_NOTHING_TO_PARSE = 1758
XML_SCHEMAP_NOROOT = 1759
XML_SCHEMAP_REDEFINED_GROUP = 1760
XML_SCHEMAP_REDEFINED_TYPE = 1761
XML_SCHEMAP_REDEFINED_ELEMENT = 1762
XML_SCHEMAP_REDEFINED_ATTRGROUP = 1763
XML_SCHEMAP_REDEFINED_ATTR = 1764
XML_SCHEMAP_REDEFINED_NOTATION = 1765
XML_SCHEMAP_FAILED_PARSE = 1766
XML_SCHEMAP_UNKNOWN_PREFIX = 1767
XML_SCHEMAP_DEF_AND_PREFIX = 1768
XML_SCHEMAP_UNKNOWN_INCLUDE_CHILD = 1769
XML_SCHEMAP_INCLUDE_SCHEMA_NOT_URI = 1770
XML_SCHEMAP_INCLUDE_SCHEMA_NO_URI = 1771
XML_SCHEMAP_NOT_SCHEMA = 1772
XML_SCHEMAP_UNKNOWN_MEMBER_TYPE = 1773
XML_SCHEMAP_INVALID_ATTR_USE = 1774
XML_SCHEMAP_RECURSIVE = 1775
XML_SCHEMAP_SUPERNUMEROUS_LIST_ITEM_TYPE = 1776
XML_SCHEMAP_INVALID_ATTR_COMBINATION = 1777
XML_SCHEMAP_INVALID_ATTR_INLINE_COMBINATION = 1778
XML_SCHEMAP_MISSING_SIMPLETYPE_CHILD = 1779
XML_SCHEMAP_INVALID_ATTR_NAME = 1780
XML_SCHEMAP_REF_AND_CONTENT = 1781
XML_SCHEMAP_CT_PROPS_CORRECT_1 = 1782
XML_SCHEMAP_CT_PROPS_CORRECT_2 = 1783
XML_SCHEMAP_CT_PROPS_CORRECT_3 = 1784
XML_SCHEMAP_CT_PROPS_CORRECT_4 = 1785
XML_SCHEMAP_CT_PROPS_CORRECT_5 = 1786
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_1 = 1787
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_1 = 1788
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_2 = 1789
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_2 = 1790
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_3 = 1791
XML_SCHEMAP_WILDCARD_INVALID_NS_MEMBER = 1792
XML_SCHEMAP_INTERSECTION_NOT_EXPRESSIBLE = 1793
XML_SCHEMAP_UNION_NOT_EXPRESSIBLE = 1794
XML_SCHEMAP_SRC_IMPORT_3_1 = 1795
XML_SCHEMAP_SRC_IMPORT_3_2 = 1796
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_1 = 1797
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_2 = 1798
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_4_3 = 1799
XML_SCHEMAP_COS_CT_EXTENDS_1_3 = 1800
XML_SCHEMAV_NOROOT = 1801
XML_SCHEMAV_UNDECLAREDELEM = 1802
XML_SCHEMAV_NOTTOPLEVEL = 1803
XML_SCHEMAV_MISSING = 1804
XML_SCHEMAV_WRONGELEM = 1805
XML_SCHEMAV_NOTYPE = 1806
XML_SCHEMAV_NOROLLBACK = 1807
XML_SCHEMAV_ISABSTRACT = 1808
XML_SCHEMAV_NOTEMPTY = 1809
XML_SCHEMAV_ELEMCONT = 1810
XML_SCHEMAV_HAVEDEFAULT = 1811
XML_SCHEMAV_NOTNILLABLE = 1812
XML_SCHEMAV_EXTRACONTENT = 1813
XML_SCHEMAV_INVALIDATTR = 1814
XML_SCHEMAV_INVALIDELEM = 1815
XML_SCHEMAV_NOTDETERMINIST = 1816
XML_SCHEMAV_CONSTRUCT = 1817
XML_SCHEMAV_INTERNAL = 1818
XML_SCHEMAV_NOTSIMPLE = 1819
XML_SCHEMAV_ATTRUNKNOWN = 1820
XML_SCHEMAV_ATTRINVALID = 1821
XML_SCHEMAV_VALUE = 1822
XML_SCHEMAV_FACET = 1823
XML_SCHEMAV_CVC_DATATYPE_VALID_1_2_1 = 1824
XML_SCHEMAV_CVC_DATATYPE_VALID_1_2_2 = 1825
XML_SCHEMAV_CVC_DATATYPE_VALID_1_2_3 = 1826
XML_SCHEMAV_CVC_TYPE_3_1_1 = 1827
XML_SCHEMAV_CVC_TYPE_3_1_2 = 1828
XML_SCHEMAV_CVC_FACET_VALID = 1829
XML_SCHEMAV_CVC_LENGTH_VALID = 1830
XML_SCHEMAV_CVC_MINLENGTH_VALID = 1831
XML_SCHEMAV_CVC_MAXLENGTH_VALID = 1832
XML_SCHEMAV_CVC_MININCLUSIVE_VALID = 1833
XML_SCHEMAV_CVC_MAXINCLUSIVE_VALID = 1834
XML_SCHEMAV_CVC_MINEXCLUSIVE_VALID = 1835
XML_SCHEMAV_CVC_MAXEXCLUSIVE_VALID = 1836
XML_SCHEMAV_CVC_TOTALDIGITS_VALID = 1837
XML_SCHEMAV_CVC_FRACTIONDIGITS_VALID = 1838
XML_SCHEMAV_CVC_PATTERN_VALID = 1839
XML_SCHEMAV_CVC_ENUMERATION_VALID = 1840
XML_SCHEMAV_CVC_COMPLEX_TYPE_2_1 = 1841
XML_SCHEMAV_CVC_COMPLEX_TYPE_2_2 = 1842
XML_SCHEMAV_CVC_COMPLEX_TYPE_2_3 = 1843
XML_SCHEMAV_CVC_COMPLEX_TYPE_2_4 = 1844
XML_SCHEMAV_CVC_ELT_1 = 1845
XML_SCHEMAV_CVC_ELT_2 = 1846
XML_SCHEMAV_CVC_ELT_3_1 = 1847
XML_SCHEMAV_CVC_ELT_3_2_1 = 1848
XML_SCHEMAV_CVC_ELT_3_2_2 = 1849
XML_SCHEMAV_CVC_ELT_4_1 = 1850
XML_SCHEMAV_CVC_ELT_4_2 = 1851
XML_SCHEMAV_CVC_ELT_4_3 = 1852
XML_SCHEMAV_CVC_ELT_5_1_1 = 1853
XML_SCHEMAV_CVC_ELT_5_1_2 = 1854
XML_SCHEMAV_CVC_ELT_5_2_1 = 1855
XML_SCHEMAV_CVC_ELT_5_2_2_1 = 1856
XML_SCHEMAV_CVC_ELT_5_2_2_2_1 = 1857
XML_SCHEMAV_CVC_ELT_5_2_2_2_2 = 1858
XML_SCHEMAV_CVC_ELT_6 = 1859
XML_SCHEMAV_CVC_ELT_7 = 1860
XML_SCHEMAV_CVC_ATTRIBUTE_1 = 1861
XML_SCHEMAV_CVC_ATTRIBUTE_2 = 1862
XML_SCHEMAV_CVC_ATTRIBUTE_3 = 1863
XML_SCHEMAV_CVC_ATTRIBUTE_4 = 1864
XML_SCHEMAV_CVC_COMPLEX_TYPE_3_1 = 1865
XML_SCHEMAV_CVC_COMPLEX_TYPE_3_2_1 = 1866
XML_SCHEMAV_CVC_COMPLEX_TYPE_3_2_2 = 1867
XML_SCHEMAV_CVC_COMPLEX_TYPE_4 = 1868
XML_SCHEMAV_CVC_COMPLEX_TYPE_5_1 = 1869
XML_SCHEMAV_CVC_COMPLEX_TYPE_5_2 = 1870
XML_SCHEMAV_ELEMENT_CONTENT = 1871
XML_SCHEMAV_DOCUMENT_ELEMENT_MISSING = 1872
XML_SCHEMAV_CVC_COMPLEX_TYPE_1 = 1873
XML_SCHEMAV_CVC_AU = 1874
XML_SCHEMAV_CVC_TYPE_1 = 1875
XML_SCHEMAV_CVC_TYPE_2 = 1876
XML_SCHEMAV_CVC_IDC = 1877
XML_SCHEMAV_CVC_WILDCARD = 1878
XML_SCHEMAV_MISC = 1879
XML_XPTR_UNKNOWN_SCHEME = 1900
XML_XPTR_CHILDSEQ_START = 1901
XML_XPTR_EVAL_FAILED = 1902
XML_XPTR_EXTRA_OBJECTS = 1903
XML_C14N_CREATE_CTXT = 1950
XML_C14N_REQUIRES_UTF8 = 1951
XML_C14N_CREATE_STACK = 1952
XML_C14N_INVALID_NODE = 1953
XML_C14N_UNKNOW_NODE = 1954
XML_C14N_RELATIVE_NAMESPACE = 1955
XML_FTP_PASV_ANSWER = 2000
XML_FTP_EPSV_ANSWER = 2001
XML_FTP_ACCNT = 2002
XML_FTP_URL_SYNTAX = 2003
XML_HTTP_URL_SYNTAX = 2020
XML_HTTP_USE_IP = 2021
XML_HTTP_UNKNOWN_HOST = 2022
XML_SCHEMAP_SRC_SIMPLE_TYPE_1 = 3000
XML_SCHEMAP_SRC_SIMPLE_TYPE_2 = 3001
XML_SCHEMAP_SRC_SIMPLE_TYPE_3 = 3002
XML_SCHEMAP_SRC_SIMPLE_TYPE_4 = 3003
XML_SCHEMAP_SRC_RESOLVE = 3004
XML_SCHEMAP_SRC_RESTRICTION_BASE_OR_SIMPLETYPE = 3005
XML_SCHEMAP_SRC_LIST_ITEMTYPE_OR_SIMPLETYPE = 3006
XML_SCHEMAP_SRC_UNION_MEMBERTYPES_OR_SIMPLETYPES = 3007
XML_SCHEMAP_ST_PROPS_CORRECT_1 = 3008
XML_SCHEMAP_ST_PROPS_CORRECT_2 = 3009
XML_SCHEMAP_ST_PROPS_CORRECT_3 = 3010
XML_SCHEMAP_COS_ST_RESTRICTS_1_1 = 3011
XML_SCHEMAP_COS_ST_RESTRICTS_1_2 = 3012
XML_SCHEMAP_COS_ST_RESTRICTS_1_3_1 = 3013
XML_SCHEMAP_COS_ST_RESTRICTS_1_3_2 = 3014
XML_SCHEMAP_COS_ST_RESTRICTS_2_1 = 3015
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_1_1 = 3016
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_1_2 = 3017
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_1 = 3018
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_2 = 3019
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_3 = 3020
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_4 = 3021
XML_SCHEMAP_COS_ST_RESTRICTS_2_3_2_5 = 3022
XML_SCHEMAP_COS_ST_RESTRICTS_3_1 = 3023
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_1 = 3024
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_1_2 = 3025
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_2 = 3026
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_1 = 3027
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_3 = 3028
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_4 = 3029
XML_SCHEMAP_COS_ST_RESTRICTS_3_3_2_5 = 3030
XML_SCHEMAP_COS_ST_DERIVED_OK_2_1 = 3031
XML_SCHEMAP_COS_ST_DERIVED_OK_2_2 = 3032
XML_SCHEMAP_S4S_ELEM_NOT_ALLOWED = 3033
XML_SCHEMAP_S4S_ELEM_MISSING = 3034
XML_SCHEMAP_S4S_ATTR_NOT_ALLOWED = 3035
XML_SCHEMAP_S4S_ATTR_MISSING = 3036
XML_SCHEMAP_S4S_ATTR_INVALID_VALUE = 3037
XML_SCHEMAP_SRC_ELEMENT_1 = 3038
XML_SCHEMAP_SRC_ELEMENT_2_1 = 3039
XML_SCHEMAP_SRC_ELEMENT_2_2 = 3040
XML_SCHEMAP_SRC_ELEMENT_3 = 3041
XML_SCHEMAP_P_PROPS_CORRECT_1 = 3042
XML_SCHEMAP_P_PROPS_CORRECT_2_1 = 3043
XML_SCHEMAP_P_PROPS_CORRECT_2_2 = 3044
XML_SCHEMAP_E_PROPS_CORRECT_2 = 3045
XML_SCHEMAP_E_PROPS_CORRECT_3 = 3046
XML_SCHEMAP_E_PROPS_CORRECT_4 = 3047
XML_SCHEMAP_E_PROPS_CORRECT_5 = 3048
XML_SCHEMAP_E_PROPS_CORRECT_6 = 3049
XML_SCHEMAP_SRC_INCLUDE = 3050
XML_SCHEMAP_SRC_ATTRIBUTE_1 = 3051
XML_SCHEMAP_SRC_ATTRIBUTE_2 = 3052
XML_SCHEMAP_SRC_ATTRIBUTE_3_1 = 3053
XML_SCHEMAP_SRC_ATTRIBUTE_3_2 = 3054
XML_SCHEMAP_SRC_ATTRIBUTE_4 = 3055
XML_SCHEMAP_NO_XMLNS = 3056
XML_SCHEMAP_NO_XSI = 3057
XML_SCHEMAP_COS_VALID_DEFAULT_1 = 3058
XML_SCHEMAP_COS_VALID_DEFAULT_2_1 = 3059
XML_SCHEMAP_COS_VALID_DEFAULT_2_2_1 = 3060
XML_SCHEMAP_COS_VALID_DEFAULT_2_2_2 = 3061
XML_SCHEMAP_CVC_SIMPLE_TYPE = 3062
XML_SCHEMAP_COS_CT_EXTENDS_1_1 = 3063
XML_SCHEMAP_SRC_IMPORT_1_1 = 3064
XML_SCHEMAP_SRC_IMPORT_1_2 = 3065
XML_SCHEMAP_SRC_IMPORT_2 = 3066
XML_SCHEMAP_SRC_IMPORT_2_1 = 3067
XML_SCHEMAP_SRC_IMPORT_2_2 = 3068
XML_SCHEMAP_INTERNAL = 3069
XML_SCHEMAP_NOT_DETERMINISTIC = 3070
XML_SCHEMAP_SRC_ATTRIBUTE_GROUP_1 = 3071
XML_SCHEMAP_SRC_ATTRIBUTE_GROUP_2 = 3072
XML_SCHEMAP_SRC_ATTRIBUTE_GROUP_3 = 3073
XML_SCHEMAP_MG_PROPS_CORRECT_1 = 3074
XML_SCHEMAP_MG_PROPS_CORRECT_2 = 3075
XML_SCHEMAP_SRC_CT_1 = 3076
XML_SCHEMAP_DERIVATION_OK_RESTRICTION_2_1_3 = 3077
XML_SCHEMAP_AU_PROPS_CORRECT_2 = 3078
XML_SCHEMAP_A_PROPS_CORRECT_2 = 3079
XML_SCHEMAP_C_PROPS_CORRECT = 3080
XML_SCHEMAP_SRC_REDEFINE = 3081
XML_SCHEMAP_SRC_IMPORT = 3082
XML_SCHEMAP_WARN_SKIP_SCHEMA = 3083
XML_SCHEMAP_WARN_UNLOCATED_SCHEMA = 3084
XML_SCHEMAP_WARN_ATTR_REDECL_PROH = 3085
XML_SCHEMAP_WARN_ATTR_POINTLESS_PROH = 3086
XML_SCHEMAP_AG_PROPS_CORRECT = 3087
XML_SCHEMAP_COS_CT_EXTENDS_1_2 = 3088
XML_SCHEMAP_AU_PROPS_CORRECT = 3089
XML_SCHEMAP_A_PROPS_CORRECT_3 = 3090
XML_SCHEMAP_COS_ALL_LIMITED = 3091
XML_SCHEMATRONV_ASSERT = 4000
XML_SCHEMATRONV_REPORT = 4001
XML_MODULE_OPEN = 4900
XML_MODULE_CLOSE = 4901
XML_CHECK_FOUND_ELEMENT = 5000
XML_CHECK_FOUND_ATTRIBUTE = 5001
XML_CHECK_FOUND_TEXT = 5002
XML_CHECK_FOUND_CDATA = 5003
XML_CHECK_FOUND_ENTITYREF = 5004
XML_CHECK_FOUND_ENTITY = 5005
XML_CHECK_FOUND_PI = 5006
XML_CHECK_FOUND_COMMENT = 5007
XML_CHECK_FOUND_DOCTYPE = 5008
XML_CHECK_FOUND_FRAGMENT = 5009
XML_CHECK_FOUND_NOTATION = 5010
XML_CHECK_UNKNOWN_NODE = 5011
XML_CHECK_ENTITY_TYPE = 5012
XML_CHECK_NO_PARENT = 5013
XML_CHECK_NO_DOC = 5014
XML_CHECK_NO_NAME = 5015
XML_CHECK_NO_ELEM = 5016
XML_CHECK_WRONG_DOC = 5017
XML_CHECK_NO_PREV = 5018
XML_CHECK_WRONG_PREV = 5019
XML_CHECK_NO_NEXT = 5020
XML_CHECK_WRONG_NEXT = 5021
XML_CHECK_NOT_DTD = 5022
XML_CHECK_NOT_ATTR = 5023
XML_CHECK_NOT_ATTR_DECL = 5024
XML_CHECK_NOT_ELEM_DECL = 5025
XML_CHECK_NOT_ENTITY_DECL = 5026
XML_CHECK_NOT_NS_DECL = 5027
XML_CHECK_NO_HREF = 5028
XML_CHECK_WRONG_PARENT = 5029
XML_CHECK_NS_SCOPE = 5030
XML_CHECK_NS_ANCESTOR = 5031
XML_CHECK_NOT_UTF8 = 5032
XML_CHECK_NO_DICT = 5033
XML_CHECK_NOT_NCNAME = 5034
XML_CHECK_OUTSIDE_DICT = 5035
XML_CHECK_WRONG_NAME = 5036
XML_CHECK_NAME_NOT_NULL = 5037
XML_I18N_NO_NAME = 6000
XML_I18N_NO_HANDLER = 6001
XML_I18N_EXCESS_HANDLER = 6002
XML_I18N_CONV_FAILED = 6003
XML_I18N_NO_OUTPUT = 6004
XML_BUF_OVERFLOW = 7000
# xmlExpNodeType
XML_EXP_EMPTY = 0
XML_EXP_FORBID = 1
XML_EXP_ATOM = 2
XML_EXP_SEQ = 3
XML_EXP_OR = 4
XML_EXP_COUNT = 5
# xmlElementContentType
XML_ELEMENT_CONTENT_PCDATA = 1
XML_ELEMENT_CONTENT_ELEMENT = 2
XML_ELEMENT_CONTENT_SEQ = 3
XML_ELEMENT_CONTENT_OR = 4
# xmlParserProperties
XML_PARSER_LOADDTD = 1
XML_PARSER_DEFAULTATTRS = 2
XML_PARSER_VALIDATE = 3
XML_PARSER_SUBST_ENTITIES = 4
# xmlReaderTypes
XML_READER_TYPE_NONE = 0
XML_READER_TYPE_ELEMENT = 1
XML_READER_TYPE_ATTRIBUTE = 2
XML_READER_TYPE_TEXT = 3
XML_READER_TYPE_CDATA = 4
XML_READER_TYPE_ENTITY_REFERENCE = 5
XML_READER_TYPE_ENTITY = 6
XML_READER_TYPE_PROCESSING_INSTRUCTION = 7
XML_READER_TYPE_COMMENT = 8
XML_READER_TYPE_DOCUMENT = 9
XML_READER_TYPE_DOCUMENT_TYPE = 10
XML_READER_TYPE_DOCUMENT_FRAGMENT = 11
XML_READER_TYPE_NOTATION = 12
XML_READER_TYPE_WHITESPACE = 13
XML_READER_TYPE_SIGNIFICANT_WHITESPACE = 14
XML_READER_TYPE_END_ELEMENT = 15
XML_READER_TYPE_END_ENTITY = 16
XML_READER_TYPE_XML_DECLARATION = 17
# xmlCatalogPrefer
XML_CATA_PREFER_NONE = 0
XML_CATA_PREFER_PUBLIC = 1
XML_CATA_PREFER_SYSTEM = 2
# xmlElementType
XML_ELEMENT_NODE = 1
XML_ATTRIBUTE_NODE = 2
XML_TEXT_NODE = 3
XML_CDATA_SECTION_NODE = 4
XML_ENTITY_REF_NODE = 5
XML_ENTITY_NODE = 6
XML_PI_NODE = 7
XML_COMMENT_NODE = 8
XML_DOCUMENT_NODE = 9
XML_DOCUMENT_TYPE_NODE = 10
XML_DOCUMENT_FRAG_NODE = 11
XML_NOTATION_NODE = 12
XML_HTML_DOCUMENT_NODE = 13
XML_DTD_NODE = 14
XML_ELEMENT_DECL = 15
XML_ATTRIBUTE_DECL = 16
XML_ENTITY_DECL = 17
XML_NAMESPACE_DECL = 18
XML_XINCLUDE_START = 19
XML_XINCLUDE_END = 20
XML_DOCB_DOCUMENT_NODE = 21
# xlinkActuate
XLINK_ACTUATE_NONE = 0
XLINK_ACTUATE_AUTO = 1
XLINK_ACTUATE_ONREQUEST = 2
# xmlFeature
XML_WITH_THREAD = 1
XML_WITH_TREE = 2
XML_WITH_OUTPUT = 3
XML_WITH_PUSH = 4
XML_WITH_READER = 5
XML_WITH_PATTERN = 6
XML_WITH_WRITER = 7
XML_WITH_SAX1 = 8
XML_WITH_FTP = 9
XML_WITH_HTTP = 10
XML_WITH_VALID = 11
XML_WITH_HTML = 12
XML_WITH_LEGACY = 13
XML_WITH_C14N = 14
XML_WITH_CATALOG = 15
XML_WITH_XPATH = 16
XML_WITH_XPTR = 17
XML_WITH_XINCLUDE = 18
XML_WITH_ICONV = 19
XML_WITH_ISO8859X = 20
XML_WITH_UNICODE = 21
XML_WITH_REGEXP = 22
XML_WITH_AUTOMATA = 23
XML_WITH_EXPR = 24
XML_WITH_SCHEMAS = 25
XML_WITH_SCHEMATRON = 26
XML_WITH_MODULES = 27
XML_WITH_DEBUG = 28
XML_WITH_DEBUG_MEM = 29
XML_WITH_DEBUG_RUN = 30
XML_WITH_ZLIB = 31
XML_WITH_ICU = 32
XML_WITH_LZMA = 33
XML_WITH_NONE = 99999
# xmlElementContentOccur
XML_ELEMENT_CONTENT_ONCE = 1
XML_ELEMENT_CONTENT_OPT = 2
XML_ELEMENT_CONTENT_MULT = 3
XML_ELEMENT_CONTENT_PLUS = 4
# xmlXPathError
XPATH_EXPRESSION_OK = 0
XPATH_NUMBER_ERROR = 1
XPATH_UNFINISHED_LITERAL_ERROR = 2
XPATH_START_LITERAL_ERROR = 3
XPATH_VARIABLE_REF_ERROR = 4
XPATH_UNDEF_VARIABLE_ERROR = 5
XPATH_INVALID_PREDICATE_ERROR = 6
XPATH_EXPR_ERROR = 7
XPATH_UNCLOSED_ERROR = 8
XPATH_UNKNOWN_FUNC_ERROR = 9
XPATH_INVALID_OPERAND = 10
XPATH_INVALID_TYPE = 11
XPATH_INVALID_ARITY = 12
XPATH_INVALID_CTXT_SIZE = 13
XPATH_INVALID_CTXT_POSITION = 14
XPATH_MEMORY_ERROR = 15
XPTR_SYNTAX_ERROR = 16
XPTR_RESOURCE_ERROR = 17
XPTR_SUB_RESOURCE_ERROR = 18
XPATH_UNDEF_PREFIX_ERROR = 19
XPATH_ENCODING_ERROR = 20
XPATH_INVALID_CHAR_ERROR = 21
XPATH_INVALID_CTXT = 22
XPATH_STACK_ERROR = 23
XPATH_FORBID_VARIABLE_ERROR = 24
# xmlTextReaderMode
XML_TEXTREADER_MODE_INITIAL = 0
XML_TEXTREADER_MODE_INTERACTIVE = 1
XML_TEXTREADER_MODE_ERROR = 2
XML_TEXTREADER_MODE_EOF = 3
XML_TEXTREADER_MODE_CLOSED = 4
XML_TEXTREADER_MODE_READING = 5
# xmlErrorLevel
XML_ERR_NONE = 0
XML_ERR_WARNING = 1
XML_ERR_ERROR = 2
XML_ERR_FATAL = 3
# xmlCharEncoding
XML_CHAR_ENCODING_ERROR = -1
XML_CHAR_ENCODING_NONE = 0
XML_CHAR_ENCODING_UTF8 = 1
XML_CHAR_ENCODING_UTF16LE = 2
XML_CHAR_ENCODING_UTF16BE = 3
XML_CHAR_ENCODING_UCS4LE = 4
XML_CHAR_ENCODING_UCS4BE = 5
XML_CHAR_ENCODING_EBCDIC = 6
XML_CHAR_ENCODING_UCS4_2143 = 7
XML_CHAR_ENCODING_UCS4_3412 = 8
XML_CHAR_ENCODING_UCS2 = 9
XML_CHAR_ENCODING_8859_1 = 10
XML_CHAR_ENCODING_8859_2 = 11
XML_CHAR_ENCODING_8859_3 = 12
XML_CHAR_ENCODING_8859_4 = 13
XML_CHAR_ENCODING_8859_5 = 14
XML_CHAR_ENCODING_8859_6 = 15
XML_CHAR_ENCODING_8859_7 = 16
XML_CHAR_ENCODING_8859_8 = 17
XML_CHAR_ENCODING_8859_9 = 18
XML_CHAR_ENCODING_2022_JP = 19
XML_CHAR_ENCODING_SHIFT_JIS = 20
XML_CHAR_ENCODING_EUC_JP = 21
XML_CHAR_ENCODING_ASCII = 22
# xmlErrorDomain
XML_FROM_NONE = 0
XML_FROM_PARSER = 1
XML_FROM_TREE = 2
XML_FROM_NAMESPACE = 3
XML_FROM_DTD = 4
XML_FROM_HTML = 5
XML_FROM_MEMORY = 6
XML_FROM_OUTPUT = 7
XML_FROM_IO = 8
XML_FROM_FTP = 9
XML_FROM_HTTP = 10
XML_FROM_XINCLUDE = 11
XML_FROM_XPATH = 12
XML_FROM_XPOINTER = 13
XML_FROM_REGEXP = 14
XML_FROM_DATATYPE = 15
XML_FROM_SCHEMASP = 16
XML_FROM_SCHEMASV = 17
XML_FROM_RELAXNGP = 18
XML_FROM_RELAXNGV = 19
XML_FROM_CATALOG = 20
XML_FROM_C14N = 21
XML_FROM_XSLT = 22
XML_FROM_VALID = 23
XML_FROM_CHECK = 24
XML_FROM_WRITER = 25
XML_FROM_MODULE = 26
XML_FROM_I18N = 27
XML_FROM_SCHEMATRONV = 28
XML_FROM_BUFFER = 29
XML_FROM_URI = 30
# htmlStatus
HTML_NA = 0
HTML_INVALID = 1
HTML_DEPRECATED = 2
HTML_VALID = 4
HTML_REQUIRED = 12
# xmlSchemaValidOption
XML_SCHEMA_VAL_VC_I_CREATE = 1
# xmlSchemaWhitespaceValueType
XML_SCHEMA_WHITESPACE_UNKNOWN = 0
XML_SCHEMA_WHITESPACE_PRESERVE = 1
XML_SCHEMA_WHITESPACE_REPLACE = 2
XML_SCHEMA_WHITESPACE_COLLAPSE = 3
# htmlParserOption
HTML_PARSE_RECOVER = 1
HTML_PARSE_NODEFDTD = 4
HTML_PARSE_NOERROR = 32
HTML_PARSE_NOWARNING = 64
HTML_PARSE_PEDANTIC = 128
HTML_PARSE_NOBLANKS = 256
HTML_PARSE_NONET = 2048
HTML_PARSE_NOIMPLIED = 8192
HTML_PARSE_COMPACT = 65536
HTML_PARSE_IGNORE_ENC = 2097152
# xmlRelaxNGValidErr
XML_RELAXNG_OK = 0
XML_RELAXNG_ERR_MEMORY = 1
XML_RELAXNG_ERR_TYPE = 2
XML_RELAXNG_ERR_TYPEVAL = 3
XML_RELAXNG_ERR_DUPID = 4
XML_RELAXNG_ERR_TYPECMP = 5
XML_RELAXNG_ERR_NOSTATE = 6
XML_RELAXNG_ERR_NODEFINE = 7
XML_RELAXNG_ERR_LISTEXTRA = 8
XML_RELAXNG_ERR_LISTEMPTY = 9
XML_RELAXNG_ERR_INTERNODATA = 10
XML_RELAXNG_ERR_INTERSEQ = 11
XML_RELAXNG_ERR_INTEREXTRA = 12
XML_RELAXNG_ERR_ELEMNAME = 13
XML_RELAXNG_ERR_ATTRNAME = 14
XML_RELAXNG_ERR_ELEMNONS = 15
XML_RELAXNG_ERR_ATTRNONS = 16
XML_RELAXNG_ERR_ELEMWRONGNS = 17
XML_RELAXNG_ERR_ATTRWRONGNS = 18
XML_RELAXNG_ERR_ELEMEXTRANS = 19
XML_RELAXNG_ERR_ATTREXTRANS = 20
XML_RELAXNG_ERR_ELEMNOTEMPTY = 21
XML_RELAXNG_ERR_NOELEM = 22
XML_RELAXNG_ERR_NOTELEM = 23
XML_RELAXNG_ERR_ATTRVALID = 24
XML_RELAXNG_ERR_CONTENTVALID = 25
XML_RELAXNG_ERR_EXTRACONTENT = 26
XML_RELAXNG_ERR_INVALIDATTR = 27
XML_RELAXNG_ERR_DATAELEM = 28
XML_RELAXNG_ERR_VALELEM = 29
XML_RELAXNG_ERR_LISTELEM = 30
XML_RELAXNG_ERR_DATATYPE = 31
XML_RELAXNG_ERR_VALUE = 32
XML_RELAXNG_ERR_LIST = 33
XML_RELAXNG_ERR_NOGRAMMAR = 34
XML_RELAXNG_ERR_EXTRADATA = 35
XML_RELAXNG_ERR_LACKDATA = 36
XML_RELAXNG_ERR_INTERNAL = 37
XML_RELAXNG_ERR_ELEMWRONG = 38
XML_RELAXNG_ERR_TEXTWRONG = 39
# xmlCatalogAllow
XML_CATA_ALLOW_NONE = 0
XML_CATA_ALLOW_GLOBAL = 1
XML_CATA_ALLOW_DOCUMENT = 2
XML_CATA_ALLOW_ALL = 3
# xmlAttributeType
XML_ATTRIBUTE_CDATA = 1
XML_ATTRIBUTE_ID = 2
XML_ATTRIBUTE_IDREF = 3
XML_ATTRIBUTE_IDREFS = 4
XML_ATTRIBUTE_ENTITY = 5
XML_ATTRIBUTE_ENTITIES = 6
XML_ATTRIBUTE_NMTOKEN = 7
XML_ATTRIBUTE_NMTOKENS = 8
XML_ATTRIBUTE_ENUMERATION = 9
XML_ATTRIBUTE_NOTATION = 10
# xmlSchematronValidOptions
XML_SCHEMATRON_OUT_QUIET = 1
XML_SCHEMATRON_OUT_TEXT = 2
XML_SCHEMATRON_OUT_XML = 4
XML_SCHEMATRON_OUT_ERROR = 8
XML_SCHEMATRON_OUT_FILE = 256
XML_SCHEMATRON_OUT_BUFFER = 512
XML_SCHEMATRON_OUT_IO = 1024
# xmlSchemaContentType
XML_SCHEMA_CONTENT_UNKNOWN = 0
XML_SCHEMA_CONTENT_EMPTY = 1
XML_SCHEMA_CONTENT_ELEMENTS = 2
XML_SCHEMA_CONTENT_MIXED = 3
XML_SCHEMA_CONTENT_SIMPLE = 4
XML_SCHEMA_CONTENT_MIXED_OR_ELEMENTS = 5
XML_SCHEMA_CONTENT_BASIC = 6
XML_SCHEMA_CONTENT_ANY = 7
# xmlSchemaTypeType
XML_SCHEMA_TYPE_BASIC = 1
XML_SCHEMA_TYPE_ANY = 2
XML_SCHEMA_TYPE_FACET = 3
XML_SCHEMA_TYPE_SIMPLE = 4
XML_SCHEMA_TYPE_COMPLEX = 5
XML_SCHEMA_TYPE_SEQUENCE = 6
XML_SCHEMA_TYPE_CHOICE = 7
XML_SCHEMA_TYPE_ALL = 8
XML_SCHEMA_TYPE_SIMPLE_CONTENT = 9
XML_SCHEMA_TYPE_COMPLEX_CONTENT = 10
XML_SCHEMA_TYPE_UR = 11
XML_SCHEMA_TYPE_RESTRICTION = 12
XML_SCHEMA_TYPE_EXTENSION = 13
XML_SCHEMA_TYPE_ELEMENT = 14
XML_SCHEMA_TYPE_ATTRIBUTE = 15
XML_SCHEMA_TYPE_ATTRIBUTEGROUP = 16
XML_SCHEMA_TYPE_GROUP = 17
XML_SCHEMA_TYPE_NOTATION = 18
XML_SCHEMA_TYPE_LIST = 19
XML_SCHEMA_TYPE_UNION = 20
XML_SCHEMA_TYPE_ANY_ATTRIBUTE = 21
XML_SCHEMA_TYPE_IDC_UNIQUE = 22
XML_SCHEMA_TYPE_IDC_KEY = 23
XML_SCHEMA_TYPE_IDC_KEYREF = 24
XML_SCHEMA_TYPE_PARTICLE = 25
XML_SCHEMA_TYPE_ATTRIBUTE_USE = 26
XML_SCHEMA_FACET_MININCLUSIVE = 1000
XML_SCHEMA_FACET_MINEXCLUSIVE = 1001
XML_SCHEMA_FACET_MAXINCLUSIVE = 1002
XML_SCHEMA_FACET_MAXEXCLUSIVE = 1003
XML_SCHEMA_FACET_TOTALDIGITS = 1004
XML_SCHEMA_FACET_FRACTIONDIGITS = 1005
XML_SCHEMA_FACET_PATTERN = 1006
XML_SCHEMA_FACET_ENUMERATION = 1007
XML_SCHEMA_FACET_WHITESPACE = 1008
XML_SCHEMA_FACET_LENGTH = 1009
XML_SCHEMA_FACET_MAXLENGTH = 1010
XML_SCHEMA_FACET_MINLENGTH = 1011
XML_SCHEMA_EXTRA_QNAMEREF = 2000
XML_SCHEMA_EXTRA_ATTR_USE_PROHIB = 2001
# xmlModuleOption
XML_MODULE_LAZY = 1
XML_MODULE_LOCAL = 2
# xmlParserMode
XML_PARSE_UNKNOWN = 0
XML_PARSE_DOM = 1
XML_PARSE_SAX = 2
XML_PARSE_PUSH_DOM = 3
XML_PARSE_PUSH_SAX = 4
XML_PARSE_READER = 5
# xmlC14NMode
XML_C14N_1_0 = 0
XML_C14N_EXCLUSIVE_1_0 = 1
XML_C14N_1_1 = 2
# xmlParserOption
XML_PARSE_RECOVER = 1
XML_PARSE_NOENT = 2
XML_PARSE_DTDLOAD = 4
XML_PARSE_DTDATTR = 8
XML_PARSE_DTDVALID = 16
XML_PARSE_NOERROR = 32
XML_PARSE_NOWARNING = 64
XML_PARSE_PEDANTIC = 128
XML_PARSE_NOBLANKS = 256
XML_PARSE_SAX1 = 512
XML_PARSE_XINCLUDE = 1024
XML_PARSE_NONET = 2048
XML_PARSE_NODICT = 4096
XML_PARSE_NSCLEAN = 8192
XML_PARSE_NOCDATA = 16384
XML_PARSE_NOXINCNODE = 32768
XML_PARSE_COMPACT = 65536
XML_PARSE_OLD10 = 131072
XML_PARSE_NOBASEFIX = 262144
XML_PARSE_HUGE = 524288
XML_PARSE_OLDSAX = 1048576
XML_PARSE_IGNORE_ENC = 2097152
XML_PARSE_BIG_LINES = 4194304
# xmlElementTypeVal
XML_ELEMENT_TYPE_UNDEFINED = 0
XML_ELEMENT_TYPE_EMPTY = 1
XML_ELEMENT_TYPE_ANY = 2
XML_ELEMENT_TYPE_MIXED = 3
XML_ELEMENT_TYPE_ELEMENT = 4
# xmlDocProperties
XML_DOC_WELLFORMED = 1
XML_DOC_NSVALID = 2
XML_DOC_OLD10 = 4
XML_DOC_DTDVALID = 8
XML_DOC_XINCLUDE = 16
XML_DOC_USERBUILT = 32
XML_DOC_INTERNAL = 64
XML_DOC_HTML = 128
# xlinkType
XLINK_TYPE_NONE = 0
XLINK_TYPE_SIMPLE = 1
XLINK_TYPE_EXTENDED = 2
XLINK_TYPE_EXTENDED_SET = 3
# xmlXPathObjectType
XPATH_UNDEFINED = 0
XPATH_NODESET = 1
XPATH_BOOLEAN = 2
XPATH_NUMBER = 3
XPATH_STRING = 4
XPATH_POINT = 5
XPATH_RANGE = 6
XPATH_LOCATIONSET = 7
XPATH_USERS = 8
XPATH_XSLT_TREE = 9
# xmlSchemaValidError
XML_SCHEMAS_ERR_OK = 0
XML_SCHEMAS_ERR_NOROOT = 1
XML_SCHEMAS_ERR_UNDECLAREDELEM = 2
XML_SCHEMAS_ERR_NOTTOPLEVEL = 3
XML_SCHEMAS_ERR_MISSING = 4
XML_SCHEMAS_ERR_WRONGELEM = 5
XML_SCHEMAS_ERR_NOTYPE = 6
XML_SCHEMAS_ERR_NOROLLBACK = 7
XML_SCHEMAS_ERR_ISABSTRACT = 8
XML_SCHEMAS_ERR_NOTEMPTY = 9
XML_SCHEMAS_ERR_ELEMCONT = 10
XML_SCHEMAS_ERR_HAVEDEFAULT = 11
XML_SCHEMAS_ERR_NOTNILLABLE = 12
XML_SCHEMAS_ERR_EXTRACONTENT = 13
XML_SCHEMAS_ERR_INVALIDATTR = 14
XML_SCHEMAS_ERR_INVALIDELEM = 15
XML_SCHEMAS_ERR_NOTDETERMINIST = 16
XML_SCHEMAS_ERR_CONSTRUCT = 17
XML_SCHEMAS_ERR_INTERNAL = 18
XML_SCHEMAS_ERR_NOTSIMPLE = 19
XML_SCHEMAS_ERR_ATTRUNKNOWN = 20
XML_SCHEMAS_ERR_ATTRINVALID = 21
XML_SCHEMAS_ERR_VALUE = 22
XML_SCHEMAS_ERR_FACET = 23
XML_SCHEMAS_ERR_ = 24
XML_SCHEMAS_ERR_XXX = 25
| gpl-2.0 |
mrquim/mrquimrepo | script.video.F4mProxy/lib/flvlib/helpers.py | 95 | 5650 | import os
import time
import datetime
from StringIO import StringIO
from UserDict import DictMixin
class UTC(datetime.tzinfo):
"""
A UTC tzinfo class, based on
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return self.ZERO
utc = UTC()
class OrderedAttrDict(DictMixin):
"""
A dictionary that preserves insert order and also has an attribute
interface.
Values can be transparently accessed and set as keys or as attributes.
"""
def __init__(self, dict=None, **kwargs):
self.__dict__["_order_priv_"] = []
self.__dict__["_data_priv_"] = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
# Mapping interface
def __setitem__(self, key, value):
if key not in self:
self._order_priv_.append(key)
self._data_priv_[key] = value
def __getitem__(self, key):
return self._data_priv_[key]
def __delitem__(self, key):
del self._data_priv_[key]
self._order_priv_.remove(key)
def keys(self):
return list(self._order_priv_)
# Attribute interface
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
try:
del self[name]
except KeyError:
raise AttributeError(name)
# Equality
def __eq__(self, other):
try:
my_iter = self.iteritems()
his_iter = other.iteritems()
except AttributeError:
return False
my_empty = False
his_empty = False
while True:
try:
my_key, my_val = my_iter.next()
except StopIteration:
my_empty = True
try:
his_key, his_val = his_iter.next()
except StopIteration:
his_empty = True
if my_empty and his_empty:
return True
if my_empty or his_empty:
return False
if (my_key, my_val) != (his_key, his_val):
return False
# String representation
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self)
def __str__(self):
return '{' + ', '.join([('%r: %r' % (key, self[key]))
for key in self._order_priv_]) + '}'
class ASPrettyPrinter(object):
"""Pretty printing of AS objects"""
def pformat(cls, val, indent=0):
cls.io = StringIO()
cls.pprint_lookup(val, indent)
return cls.io.getvalue()
pformat = classmethod(pformat)
def pprint(cls, val):
print cls.pformat(val)
pprint = classmethod(pprint)
def pprint_lookup(cls, val, ident):
if isinstance(val, basestring):
return cls.pprint_string(val)
if isinstance(val, (int, long, float)):
return cls.pprint_number(val)
if isinstance(val, datetime.datetime):
return cls.pprint_datetime(val)
if hasattr(val, 'iterkeys'):
# dict interface
return cls.pprint_dict(val, ident)
if hasattr(val, 'append'):
# list interface
return cls.pprint_list(val, ident)
# Unknown type ?
cls.io.write("%r" % (val, ))
return False
pprint_lookup = classmethod(pprint_lookup)
def pprint_string(cls, val):
if isinstance(val, unicode):
cls.io.write("u'%s'" % val.encode("UTF8"))
else:
cls.io.write("'%s'" % val)
return False
pprint_string = classmethod(pprint_string)
def pprint_number(cls, val):
cls.io.write(str(val))
return False
pprint_number = classmethod(pprint_number)
def pprint_datetime(cls, val):
cls.io.write(val.replace(microsecond=0).isoformat(' '))
return False
pprint_datetime = classmethod(pprint_datetime)
def pprint_dict(cls, val, indent):
def pprint_item(k):
last_pos = cls.io.tell()
cls.io.write(repr(k))
cls.io.write(": ")
new_indent = indent + cls.io.tell() - last_pos + 1
return cls.pprint_lookup(val[k], new_indent)
cls.io.write('{')
indented = False
keys = list(val.iterkeys())
if keys:
for k in keys[:-1]:
indented |= pprint_item(k)
cls.io.write(",\n%s " % (" "*indent))
indented |= pprint_item(keys[-1])
cls.io.write('}')
return (len(keys) > 1) | indented
pprint_dict = classmethod(pprint_dict)
def pprint_list(cls, val, indent):
last_pos = cls.io.tell()
cls.io.write('[')
new_indent = indent + cls.io.tell() - last_pos
indented = False
values = list(iter(val))
if values:
for v in values[:-1]:
indented |= cls.pprint_lookup(v, new_indent)
cls.io.write(",\n%s" % (" "*new_indent))
indented |= cls.pprint_lookup(values[-1], new_indent)
cls.io.write(']')
return (len(values) > 1) | indented
pprint_list = classmethod(pprint_list)
pformat = ASPrettyPrinter.pformat
pprint = ASPrettyPrinter.pprint
def force_remove(path):
try:
os.remove(path)
except OSError:
pass
| gpl-2.0 |
andrewyoung1991/abjad | abjad/tools/mathtools/yield_all_partitions_of_integer.py | 2 | 1024 | # -*- encoding: utf-8 -*-
def yield_all_partitions_of_integer(n):
r'''Yields all partitions of positive integer `n` in descending lex order:
::
>>> for partition in mathtools.yield_all_partitions_of_integer(7):
... partition
...
(7,)
(6, 1)
(5, 2)
(5, 1, 1)
(4, 3)
(4, 2, 1)
(4, 1, 1, 1)
(3, 3, 1)
(3, 2, 2)
(3, 2, 1, 1)
(3, 1, 1, 1, 1)
(2, 2, 2, 1)
(2, 2, 1, 1, 1)
(2, 1, 1, 1, 1, 1)
(1, 1, 1, 1, 1, 1, 1)
Returns generator of positive integer tuples of length at least ``1``.
'''
from abjad.tools import mathtools
if not isinstance(n, int):
message = 'must be integer.'
raise TypeError(message)
if not 0 < n:
message = 'must be positive.'
raise ValueError(message)
partition = (n, )
while partition is not None:
yield partition
partition = mathtools.next_integer_partition(partition) | gpl-3.0 |
jessefeinman/FintechHackathon | venv/Lib/site-packages/wheel/metadata.py | 93 | 11676 | """
Tools for converting old- to new-style metadata.
"""
from collections import namedtuple
from .pkginfo import read_pkg_info
from .util import OrderedDefaultDict
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
import re
import os.path
import textwrap
import pkg_resources
import email.parser
from . import __version__ as wheel_version
METADATA_VERSION = "2.0"
PLURAL_FIELDS = { "classifier" : "classifiers",
"provides_dist" : "provides",
"provides_extra" : "extras" }
SKIP_FIELDS = set()
CONTACT_FIELDS = (({"email":"author_email", "name": "author"},
"author"),
({"email":"maintainer_email", "name": "maintainer"},
"maintainer"))
# commonly filled out as "UNKNOWN" by distutils:
UNKNOWN_FIELDS = set(("author", "author_email", "platform", "home_page",
"license"))
# Wheel itself is probably the only program that uses non-extras markers
# in METADATA/PKG-INFO. Support its syntax with the extra at the end only.
EXTRA_RE = re.compile("""^(?P<package>.*?)(;\s*(?P<condition>.*?)(extra == '(?P<extra>.*?)')?)$""")
KEYWORDS_RE = re.compile("[\0-,]+")
MayRequiresKey = namedtuple('MayRequiresKey', ('condition', 'extra'))
def unique(iterable):
"""
Yield unique values in iterable, preserving order.
"""
seen = set()
for value in iterable:
if not value in seen:
seen.add(value)
yield value
def handle_requires(metadata, pkg_info, key):
"""
Place the runtime requirements from pkg_info into metadata.
"""
may_requires = OrderedDefaultDict(list)
for value in sorted(pkg_info.get_all(key)):
extra_match = EXTRA_RE.search(value)
if extra_match:
groupdict = extra_match.groupdict()
condition = groupdict['condition']
extra = groupdict['extra']
package = groupdict['package']
if condition.endswith(' and '):
condition = condition[:-5]
else:
condition, extra = None, None
package = value
key = MayRequiresKey(condition, extra)
may_requires[key].append(package)
if may_requires:
metadata['run_requires'] = []
def sort_key(item):
# Both condition and extra could be None, which can't be compared
# against strings in Python 3.
key, value = item
if key.condition is None:
return ''
return key.condition
for key, value in sorted(may_requires.items(), key=sort_key):
may_requirement = OrderedDict((('requires', value),))
if key.extra:
may_requirement['extra'] = key.extra
if key.condition:
may_requirement['environment'] = key.condition
metadata['run_requires'].append(may_requirement)
if not 'extras' in metadata:
metadata['extras'] = []
metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
def pkginfo_to_dict(path, distribution=None):
"""
Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict.
The description is included under the key ['description'] rather than
being written to a separate file.
path: path to PKG-INFO file
distribution: optional distutils Distribution()
"""
metadata = OrderedDefaultDict(lambda: OrderedDefaultDict(lambda: OrderedDefaultDict(OrderedDict)))
metadata["generator"] = "bdist_wheel (" + wheel_version + ")"
try:
unicode
pkg_info = read_pkg_info(path)
except NameError:
with open(path, 'rb') as pkg_info_file:
pkg_info = email.parser.Parser().parsestr(pkg_info_file.read().decode('utf-8'))
description = None
if pkg_info['Summary']:
metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary')
del pkg_info['Summary']
if pkg_info['Description']:
description = dedent_description(pkg_info)
del pkg_info['Description']
else:
payload = pkg_info.get_payload()
if isinstance(payload, bytes):
# Avoid a Python 2 Unicode error.
# We still suffer ? glyphs on Python 3.
payload = payload.decode('utf-8')
if payload:
description = payload
if description:
pkg_info['description'] = description
for key in sorted(unique(k.lower() for k in pkg_info.keys())):
low_key = key.replace('-', '_')
if low_key in SKIP_FIELDS:
continue
if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN':
continue
if low_key in sorted(PLURAL_FIELDS):
metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key)
elif low_key == "requires_dist":
handle_requires(metadata, pkg_info, key)
elif low_key == 'provides_extra':
if not 'extras' in metadata:
metadata['extras'] = []
metadata['extras'].extend(pkg_info.get_all(key))
elif low_key == 'home_page':
metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]}
elif low_key == 'keywords':
metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key])
else:
metadata[low_key] = pkg_info[key]
metadata['metadata_version'] = METADATA_VERSION
if 'extras' in metadata:
metadata['extras'] = sorted(set(metadata['extras']))
# include more information if distribution is available
if distribution:
for requires, attr in (('test_requires', 'tests_require'),):
try:
requirements = getattr(distribution, attr)
if isinstance(requirements, list):
new_requirements = sorted(convert_requirements(requirements))
metadata[requires] = [{'requires':new_requirements}]
except AttributeError:
pass
# handle contacts
contacts = []
for contact_type, role in CONTACT_FIELDS:
contact = OrderedDict()
for key in sorted(contact_type):
if contact_type[key] in metadata:
contact[key] = metadata.pop(contact_type[key])
if contact:
contact['role'] = role
contacts.append(contact)
if contacts:
metadata['extensions']['python.details']['contacts'] = contacts
# convert entry points to exports
try:
with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file:
ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read())
exports = OrderedDict()
for group, items in sorted(ep_map.items()):
exports[group] = OrderedDict()
for item in sorted(map(str, items.values())):
name, export = item.split(' = ', 1)
exports[group][name] = export
if exports:
metadata['extensions']['python.exports'] = exports
except IOError:
pass
# copy console_scripts entry points to commands
if 'python.exports' in metadata['extensions']:
for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'),
('gui_scripts', 'wrap_gui')):
if ep_script in metadata['extensions']['python.exports']:
metadata['extensions']['python.commands'][wrap_script] = \
metadata['extensions']['python.exports'][ep_script]
return metadata
def requires_to_requires_dist(requirement):
"""Compose the version predicates for requirement in PEP 345 fashion."""
requires_dist = []
for op, ver in requirement.specs:
requires_dist.append(op + ver)
if not requires_dist:
return ''
return " (%s)" % ','.join(requires_dist)
def convert_requirements(requirements):
"""Yield Requires-Dist: strings for parsed requirements strings."""
for req in requirements:
parsed_requirement = pkg_resources.Requirement.parse(req)
spec = requires_to_requires_dist(parsed_requirement)
extras = ",".join(parsed_requirement.extras)
if extras:
extras = "[%s]" % extras
yield (parsed_requirement.project_name + extras + spec)
def generate_requirements(extras_require):
"""
Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement')
and ('Provides-Extra', 'extra') tuples.
extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
using the empty extra {'': [requirements]} to hold install_requires.
"""
for extra, depends in extras_require.items():
condition = ''
if extra and ':' in extra: # setuptools extra:condition syntax
extra, condition = extra.split(':', 1)
extra = pkg_resources.safe_extra(extra)
if extra:
yield ('Provides-Extra', extra)
if condition:
condition += " and "
condition += "extra == '%s'" % extra
if condition:
condition = '; ' + condition
for new_req in convert_requirements(depends):
yield ('Requires-Dist', new_req + condition)
def pkginfo_to_metadata(egg_info_path, pkginfo_path):
"""
Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka
old-draft Metadata 2.0 format.
"""
pkg_info = read_pkg_info(pkginfo_path)
pkg_info.replace_header('Metadata-Version', '2.0')
requires_path = os.path.join(egg_info_path, 'requires.txt')
if os.path.exists(requires_path):
with open(requires_path) as requires_file:
requires = requires_file.read()
for extra, reqs in sorted(pkg_resources.split_sections(requires),
key=lambda x: x[0] or ''):
for item in generate_requirements({extra: reqs}):
pkg_info[item[0]] = item[1]
description = pkg_info['Description']
if description:
pkg_info.set_payload(dedent_description(pkg_info))
del pkg_info['Description']
return pkg_info
def pkginfo_unicode(pkg_info, field):
"""Hack to coax Unicode out of an email Message() - Python 3.3+"""
text = pkg_info[field]
field = field.lower()
if not isinstance(text, str):
if not hasattr(pkg_info, 'raw_items'): # Python 3.2
return str(text)
for item in pkg_info.raw_items():
if item[0].lower() == field:
text = item[1].encode('ascii', 'surrogateescape')\
.decode('utf-8')
break
return text
def dedent_description(pkg_info):
"""
Dedent and convert pkg_info['Description'] to Unicode.
"""
description = pkg_info['Description']
# Python 3 Unicode handling, sorta.
surrogates = False
if not isinstance(description, str):
surrogates = True
description = pkginfo_unicode(pkg_info, 'Description')
description_lines = description.splitlines()
description_dedent = '\n'.join(
# if the first line of long_description is blank,
# the first line here will be indented.
(description_lines[0].lstrip(),
textwrap.dedent('\n'.join(description_lines[1:])),
'\n'))
if surrogates:
description_dedent = description_dedent\
.encode("utf8")\
.decode("ascii", "surrogateescape")
return description_dedent
if __name__ == "__main__":
import sys, pprint
pprint.pprint(pkginfo_to_dict(sys.argv[1]))
| bsd-2-clause |
MadcowD/libgdx | extensions/gdx-freetype/jni/freetype-2.6.2/src/tools/docmaker/docmaker.py | 165 | 3183 | #!/usr/bin/env python
#
# docmaker.py
#
# Convert source code markup to HTML documentation.
#
# Copyright 2002-2015 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This program is a re-write of the original DocMaker tool used to generate
# the API Reference of the FreeType font rendering engine by converting
# in-source comments into structured HTML.
#
# This new version is capable of outputting XML data as well as accepting
# more liberal formatting options. It also uses regular expression matching
# and substitution to speed up operation significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""Main program loop."""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:],
"ht:o:p:",
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor,
project_title,
project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
if __name__ == '__main__':
main( sys.argv )
# eof
| apache-2.0 |
adamklawonn/CityCircles | citycircles_iphone/build_back2/iphoneDistribution-iphonesimulator/CityCircles.app/globalmaptiles.py | 28 | 16529 | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
import math
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.2572235630016,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon ):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my ):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution( zoom )
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution( zoom )
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels( mx, my, zoom)
return self.PixelsToTile( px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters( tx*self.tileSize, ty*self.tileSize, zoom )
maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom )
return ( minx, miny, maxx, maxy )
def TileLatLonBounds(self, tx, ty, zoom ):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds( tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return ( minLat, minLon, maxLat, maxLon )
def Resolution(self, zoom ):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i-1 if i!=0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom ):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
#---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tileSize = 256):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180 / 256.0 / 2**zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def Resolution(self, zoom ):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180 / 256.0 / 2**zoom
#return 180 / float( 1 << (8+zoom) )
def TileBounds(tx, ty, zoom):
"Returns bounds of the given tile"
res = 180 / 256.0 / 2**zoom
return (
tx*256*res - 180,
ty*256*res - 90,
(tx+1)*256*res - 180,
(ty+1)*256*res - 90
)
if __name__ == "__main__":
import sys, os
def Usage(s = ""):
print "Usage: globalmaptiles.py [-profile 'mercator'|'geodetic'] zoomlevel lat lon [latmax lonmax]"
print
if s:
print s
print
print "This utility prints for given WGS84 lat/lon coordinates (or bounding box) the list of tiles"
print "covering specified area. Tiles are in the given 'profile' (default is Google Maps 'mercator')"
print "and in the given pyramid 'zoomlevel'."
print "For each tile several information is printed including bonding box in EPSG:900913 and WGS84."
sys.exit(1)
profile = 'mercator'
zoomlevel = None
lat, lon, latmax, lonmax = None, None, None, None
boundingbox = False
argv = sys.argv
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-profile':
i = i + 1
profile = argv[i]
if zoomlevel is None:
zoomlevel = int(argv[i])
elif lat is None:
lat = float(argv[i])
elif lon is None:
lon = float(argv[i])
elif latmax is None:
latmax = float(argv[i])
elif lonmax is None:
lonmax = float(argv[i])
else:
Usage("ERROR: Too many parameters")
i = i + 1
if profile != 'mercator':
Usage("ERROR: Sorry, given profile is not implemented yet.")
if zoomlevel == None or lat == None or lon == None:
Usage("ERROR: Specify at least 'zoomlevel', 'lat' and 'lon'.")
if latmax is not None and lonmax is None:
Usage("ERROR: Both 'latmax' and 'lonmax' must be given.")
if latmax != None and lonmax != None:
if latmax < lat:
Usage("ERROR: 'latmax' must be bigger then 'lat'")
if lonmax < lon:
Usage("ERROR: 'lonmax' must be bigger then 'lon'")
boundingbox = (lon, lat, lonmax, latmax)
tz = zoomlevel
mercator = GlobalMercator()
mx, my = mercator.LatLonToMeters( lat, lon )
print "Spherical Mercator (ESPG:900913) coordinates for lat/lon: "
print (mx, my)
tminx, tminy = mercator.MetersToTile( mx, my, tz )
if boundingbox:
mx, my = mercator.LatLonToMeters( latmax, lonmax )
print "Spherical Mercator (ESPG:900913) cooridnate for maxlat/maxlon: "
print (mx, my)
tmaxx, tmaxy = mercator.MetersToTile( mx, my, tz )
else:
tmaxx, tmaxy = tminx, tminy
for ty in range(tminy, tmaxy+1):
for tx in range(tminx, tmaxx+1):
tilefilename = "%s/%s/%s" % (tz, tx, ty)
print tilefilename, "( TileMapService: z / x / y )"
gx, gy = mercator.GoogleTile(tx, ty, tz)
print "\tGoogle:", gx, gy
quadkey = mercator.QuadTree(tx, ty, tz)
print "\tQuadkey:", quadkey, '(',int(quadkey, 4),')'
bounds = mercator.TileBounds( tx, ty, tz)
print
print "\tEPSG:900913 Extent: ", bounds
wgsbounds = mercator.TileLatLonBounds( tx, ty, tz)
print "\tWGS84 Extent:", wgsbounds
print "\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % (
bounds[0], bounds[1], bounds[2], bounds[3], "<your-raster-file-in-epsg900913.ext>", tz, tx, ty)
print
| gpl-3.0 |
jemekite/Dougpool | p2pool/util/forest.py | 281 | 13557 | '''
forest data structure
'''
import itertools
from p2pool.util import skiplist, variable
class TrackerSkipList(skiplist.SkipList):
def __init__(self, tracker):
skiplist.SkipList.__init__(self)
self.tracker = tracker
self.tracker.removed.watch_weakref(self, lambda self, item: self.forget_item(item.hash))
def previous(self, element):
return self.tracker._delta_type.from_element(self.tracker.items[element]).tail
class DistanceSkipList(TrackerSkipList):
def get_delta(self, element):
return element, 1, self.previous(element)
def combine_deltas(self, (from_hash1, dist1, to_hash1), (from_hash2, dist2, to_hash2)):
if to_hash1 != from_hash2:
raise AssertionError()
return from_hash1, dist1 + dist2, to_hash2
def initial_solution(self, start, (n,)):
return 0, start
def apply_delta(self, (dist1, to_hash1), (from_hash2, dist2, to_hash2), (n,)):
if to_hash1 != from_hash2:
raise AssertionError()
return dist1 + dist2, to_hash2
def judge(self, (dist, hash), (n,)):
if dist > n:
return 1
elif dist == n:
return 0
else:
return -1
def finalize(self, (dist, hash), (n,)):
assert dist == n
return hash
def get_attributedelta_type(attrs): # attrs: {name: func}
class ProtoAttributeDelta(object):
__slots__ = ['head', 'tail'] + attrs.keys()
@classmethod
def get_none(cls, element_id):
return cls(element_id, element_id, **dict((k, 0) for k in attrs))
@classmethod
def from_element(cls, item):
return cls(item.hash, item.previous_hash, **dict((k, v(item)) for k, v in attrs.iteritems()))
@staticmethod
def get_head(item):
return item.hash
@staticmethod
def get_tail(item):
return item.previous_hash
def __init__(self, head, tail, **kwargs):
self.head, self.tail = head, tail
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __add__(self, other):
assert self.tail == other.head
return self.__class__(self.head, other.tail, **dict((k, getattr(self, k) + getattr(other, k)) for k in attrs))
def __sub__(self, other):
if self.head == other.head:
return self.__class__(other.tail, self.tail, **dict((k, getattr(self, k) - getattr(other, k)) for k in attrs))
elif self.tail == other.tail:
return self.__class__(self.head, other.head, **dict((k, getattr(self, k) - getattr(other, k)) for k in attrs))
else:
raise AssertionError()
def __repr__(self):
return '%s(%r, %r%s)' % (self.__class__, self.head, self.tail, ''.join(', %s=%r' % (k, getattr(self, k)) for k in attrs))
ProtoAttributeDelta.attrs = attrs
return ProtoAttributeDelta
AttributeDelta = get_attributedelta_type(dict(
height=lambda item: 1,
))
class TrackerView(object):
def __init__(self, tracker, delta_type):
self._tracker = tracker
self._delta_type = delta_type
self._deltas = {} # item_hash -> delta, ref
self._reverse_deltas = {} # ref -> set of item_hashes
self._ref_generator = itertools.count()
self._delta_refs = {} # ref -> delta
self._reverse_delta_refs = {} # delta.tail -> ref
self._tracker.remove_special.watch_weakref(self, lambda self, item: self._handle_remove_special(item))
self._tracker.remove_special2.watch_weakref(self, lambda self, item: self._handle_remove_special2(item))
self._tracker.removed.watch_weakref(self, lambda self, item: self._handle_removed(item))
def _handle_remove_special(self, item):
delta = self._delta_type.from_element(item)
if delta.tail not in self._reverse_delta_refs:
return
# move delta refs referencing children down to this, so they can be moved up in one step
for x in list(self._reverse_deltas.get(self._reverse_delta_refs.get(delta.head, object()), set())):
self.get_last(x)
assert delta.head not in self._reverse_delta_refs, list(self._reverse_deltas.get(self._reverse_delta_refs.get(delta.head, object()), set()))
if delta.tail not in self._reverse_delta_refs:
return
# move ref pointing to this up
ref = self._reverse_delta_refs[delta.tail]
cur_delta = self._delta_refs[ref]
assert cur_delta.tail == delta.tail
self._delta_refs[ref] = cur_delta - delta
assert self._delta_refs[ref].tail == delta.head
del self._reverse_delta_refs[delta.tail]
self._reverse_delta_refs[delta.head] = ref
def _handle_remove_special2(self, item):
delta = self._delta_type.from_element(item)
if delta.tail not in self._reverse_delta_refs:
return
ref = self._reverse_delta_refs.pop(delta.tail)
del self._delta_refs[ref]
for x in self._reverse_deltas.pop(ref):
del self._deltas[x]
def _handle_removed(self, item):
delta = self._delta_type.from_element(item)
# delete delta entry and ref if it is empty
if delta.head in self._deltas:
delta1, ref = self._deltas.pop(delta.head)
self._reverse_deltas[ref].remove(delta.head)
if not self._reverse_deltas[ref]:
del self._reverse_deltas[ref]
delta2 = self._delta_refs.pop(ref)
del self._reverse_delta_refs[delta2.tail]
def get_height(self, item_hash):
return self.get_delta_to_last(item_hash).height
def get_work(self, item_hash):
return self.get_delta_to_last(item_hash).work
def get_last(self, item_hash):
return self.get_delta_to_last(item_hash).tail
def get_height_and_last(self, item_hash):
delta = self.get_delta_to_last(item_hash)
return delta.height, delta.tail
def _get_delta(self, item_hash):
if item_hash in self._deltas:
delta1, ref = self._deltas[item_hash]
delta2 = self._delta_refs[ref]
res = delta1 + delta2
else:
res = self._delta_type.from_element(self._tracker.items[item_hash])
assert res.head == item_hash
return res
def _set_delta(self, item_hash, delta):
other_item_hash = delta.tail
if other_item_hash not in self._reverse_delta_refs:
ref = self._ref_generator.next()
assert ref not in self._delta_refs
self._delta_refs[ref] = self._delta_type.get_none(other_item_hash)
self._reverse_delta_refs[other_item_hash] = ref
del ref
ref = self._reverse_delta_refs[other_item_hash]
ref_delta = self._delta_refs[ref]
assert ref_delta.tail == other_item_hash
if item_hash in self._deltas:
prev_ref = self._deltas[item_hash][1]
self._reverse_deltas[prev_ref].remove(item_hash)
if not self._reverse_deltas[prev_ref] and prev_ref != ref:
self._reverse_deltas.pop(prev_ref)
x = self._delta_refs.pop(prev_ref)
self._reverse_delta_refs.pop(x.tail)
self._deltas[item_hash] = delta - ref_delta, ref
self._reverse_deltas.setdefault(ref, set()).add(item_hash)
def get_delta_to_last(self, item_hash):
assert isinstance(item_hash, (int, long, type(None)))
delta = self._delta_type.get_none(item_hash)
updates = []
while delta.tail in self._tracker.items:
updates.append((delta.tail, delta))
this_delta = self._get_delta(delta.tail)
delta += this_delta
for update_hash, delta_then in updates:
self._set_delta(update_hash, delta - delta_then)
return delta
def get_delta(self, item, ancestor):
assert self._tracker.is_child_of(ancestor, item)
return self.get_delta_to_last(item) - self.get_delta_to_last(ancestor)
class Tracker(object):
def __init__(self, items=[], delta_type=AttributeDelta):
self.items = {} # hash -> item
self.reverse = {} # delta.tail -> set of item_hashes
self.heads = {} # head hash -> tail_hash
self.tails = {} # tail hash -> set of head hashes
self.added = variable.Event()
self.remove_special = variable.Event()
self.remove_special2 = variable.Event()
self.removed = variable.Event()
self.get_nth_parent_hash = DistanceSkipList(self)
self._delta_type = delta_type
self._default_view = TrackerView(self, delta_type)
for item in items:
self.add(item)
def __getattr__(self, name):
attr = getattr(self._default_view, name)
setattr(self, name, attr)
return attr
def add(self, item):
assert not isinstance(item, (int, long, type(None)))
delta = self._delta_type.from_element(item)
if delta.head in self.items:
raise ValueError('item already present')
if delta.head in self.tails:
heads = self.tails.pop(delta.head)
else:
heads = set([delta.head])
if delta.tail in self.heads:
tail = self.heads.pop(delta.tail)
else:
tail = self.get_last(delta.tail)
self.items[delta.head] = item
self.reverse.setdefault(delta.tail, set()).add(delta.head)
self.tails.setdefault(tail, set()).update(heads)
if delta.tail in self.tails[tail]:
self.tails[tail].remove(delta.tail)
for head in heads:
self.heads[head] = tail
self.added.happened(item)
def remove(self, item_hash):
assert isinstance(item_hash, (int, long, type(None)))
if item_hash not in self.items:
raise KeyError()
item = self.items[item_hash]
del item_hash
delta = self._delta_type.from_element(item)
children = self.reverse.get(delta.head, set())
if delta.head in self.heads and delta.tail in self.tails:
tail = self.heads.pop(delta.head)
self.tails[tail].remove(delta.head)
if not self.tails[delta.tail]:
self.tails.pop(delta.tail)
elif delta.head in self.heads:
tail = self.heads.pop(delta.head)
self.tails[tail].remove(delta.head)
if self.reverse[delta.tail] != set([delta.head]):
pass # has sibling
else:
self.tails[tail].add(delta.tail)
self.heads[delta.tail] = tail
elif delta.tail in self.tails and len(self.reverse[delta.tail]) <= 1:
heads = self.tails.pop(delta.tail)
for head in heads:
self.heads[head] = delta.head
self.tails[delta.head] = set(heads)
self.remove_special.happened(item)
elif delta.tail in self.tails and len(self.reverse[delta.tail]) > 1:
heads = [x for x in self.tails[delta.tail] if self.is_child_of(delta.head, x)]
self.tails[delta.tail] -= set(heads)
if not self.tails[delta.tail]:
self.tails.pop(delta.tail)
for head in heads:
self.heads[head] = delta.head
assert delta.head not in self.tails
self.tails[delta.head] = set(heads)
self.remove_special2.happened(item)
else:
raise NotImplementedError()
self.items.pop(delta.head)
self.reverse[delta.tail].remove(delta.head)
if not self.reverse[delta.tail]:
self.reverse.pop(delta.tail)
self.removed.happened(item)
def get_chain(self, start_hash, length):
assert length <= self.get_height(start_hash)
for i in xrange(length):
item = self.items[start_hash]
yield item
start_hash = self._delta_type.get_tail(item)
def is_child_of(self, item_hash, possible_child_hash):
height, last = self.get_height_and_last(item_hash)
child_height, child_last = self.get_height_and_last(possible_child_hash)
if child_last != last:
return None # not connected, so can't be determined
height_up = child_height - height
return height_up >= 0 and self.get_nth_parent_hash(possible_child_hash, height_up) == item_hash
class SubsetTracker(Tracker):
def __init__(self, subset_of, **kwargs):
Tracker.__init__(self, **kwargs)
self.get_nth_parent_hash = subset_of.get_nth_parent_hash # overwrites Tracker.__init__'s
self._subset_of = subset_of
def add(self, item):
if self._subset_of is not None:
assert self._delta_type.get_head(item) in self._subset_of.items
Tracker.add(self, item)
def remove(self, item_hash):
if self._subset_of is not None:
assert item_hash in self._subset_of.items
Tracker.remove(self, item_hash)
| gpl-3.0 |
LaoZhongGu/kbengine | kbe/src/lib/python/Doc/includes/sqlite3/converter_point.py | 55 | 1192 | import sqlite3
class Point:
def __init__(self, x, y):
self.x, self.y = x, y
def __repr__(self):
return "(%f;%f)" % (self.x, self.y)
def adapt_point(point):
return ("%f;%f" % (point.x, point.y)).encode('ascii')
def convert_point(s):
x, y = list(map(float, s.split(b";")))
return Point(x, y)
# Register the adapter
sqlite3.register_adapter(Point, adapt_point)
# Register the converter
sqlite3.register_converter("point", convert_point)
p = Point(4.0, -3.2)
#########################
# 1) Using declared types
con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
cur.execute("create table test(p point)")
cur.execute("insert into test(p) values (?)", (p,))
cur.execute("select p from test")
print("with declared types:", cur.fetchone()[0])
cur.close()
con.close()
#######################
# 1) Using column names
con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_COLNAMES)
cur = con.cursor()
cur.execute("create table test(p)")
cur.execute("insert into test(p) values (?)", (p,))
cur.execute('select p as "p [point]" from test')
print("with column names:", cur.fetchone()[0])
cur.close()
con.close()
| lgpl-3.0 |
shsingh/ansible | lib/ansible/modules/network/fortios/fortios_wanopt_settings.py | 7 | 9324 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wanopt_settings
short_description: Configure WAN optimization settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wanopt feature and settings category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
wanopt_settings:
description:
- Configure WAN optimization settings.
default: null
type: dict
suboptions:
auto_detect_algorithm:
description:
- Auto detection algorithms used in tunnel negotiations.
type: str
choices:
- simple
- diff-req-resp
host_id:
description:
- Local host ID (must also be entered in the remote FortiGate's peer list).
type: str
tunnel_ssl_algorithm:
description:
- Relative strength of encryption algorithms accepted during tunnel negotiation.
type: str
choices:
- low
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure WAN optimization settings.
fortios_wanopt_settings:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
wanopt_settings:
auto_detect_algorithm: "simple"
host_id: "myhostname"
tunnel_ssl_algorithm: "low"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wanopt_settings_data(json):
option_list = ['auto_detect_algorithm', 'host_id', 'tunnel_ssl_algorithm']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wanopt_settings(data, fos):
vdom = data['vdom']
wanopt_settings_data = data['wanopt_settings']
filtered_data = underscore_to_hyphen(filter_wanopt_settings_data(wanopt_settings_data))
return fos.set('wanopt',
'settings',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wanopt(data, fos):
if data['wanopt_settings']:
resp = wanopt_settings(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"wanopt_settings": {
"required": False, "type": "dict", "default": None,
"options": {
"auto_detect_algorithm": {"required": False, "type": "str",
"choices": ["simple", "diff-req-resp"]},
"host_id": {"required": False, "type": "str"},
"tunnel_ssl_algorithm": {"required": False, "type": "str",
"choices": ["low"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wanopt(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wanopt(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jylaxp/django | tests/app_loading/tests.py | 288 | 3113 | from __future__ import unicode_literals
import os
from django.apps import apps
from django.test import SimpleTestCase
from django.test.utils import extend_sys_path
from django.utils import six
from django.utils._os import upath
class EggLoadingTest(SimpleTestCase):
def setUp(self):
self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
def tearDown(self):
apps.clear_cache()
def test_egg1(self):
"""Models module can be loaded from an app in an egg"""
egg_name = '%s/modelapp.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['app_with_models']):
models_module = apps.get_app_config('app_with_models').models_module
self.assertIsNotNone(models_module)
del apps.all_models['app_with_models']
def test_egg2(self):
"""Loading an app from an egg that has no models returns no models (and no error)"""
egg_name = '%s/nomodelapp.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['app_no_models']):
models_module = apps.get_app_config('app_no_models').models_module
self.assertIsNone(models_module)
del apps.all_models['app_no_models']
def test_egg3(self):
"""Models module can be loaded from an app located under an egg's top-level package"""
egg_name = '%s/omelet.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['omelet.app_with_models']):
models_module = apps.get_app_config('app_with_models').models_module
self.assertIsNotNone(models_module)
del apps.all_models['app_with_models']
def test_egg4(self):
"""Loading an app with no models from under the top-level egg package generates no error"""
egg_name = '%s/omelet.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['omelet.app_no_models']):
models_module = apps.get_app_config('app_no_models').models_module
self.assertIsNone(models_module)
del apps.all_models['app_no_models']
def test_egg5(self):
"""Loading an app from an egg that has an import error in its models module raises that error"""
egg_name = '%s/brokenapp.egg' % self.egg_dir
with extend_sys_path(egg_name):
with six.assertRaisesRegex(self, ImportError, 'modelz'):
with self.settings(INSTALLED_APPS=['broken_app']):
pass
class GetModelsTest(SimpleTestCase):
def setUp(self):
from .not_installed import models
self.not_installed_module = models
def test_get_model_only_returns_installed_models(self):
with self.assertRaises(LookupError):
apps.get_model("not_installed", "NotInstalledModel")
def test_get_models_only_returns_installed_models(self):
self.assertNotIn(
"NotInstalledModel",
[m.__name__ for m in apps.get_models()])
| bsd-3-clause |
jamesbeebop/CouchPotatoServer | couchpotato/core/media/__init__.py | 22 | 3818 | import os
import traceback
from couchpotato import CPLog, md5
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getExt
from couchpotato.core.plugins.base import Plugin
import six
log = CPLog(__name__)
class MediaBase(Plugin):
_type = None
def initType(self):
addEvent('media.types', self.getType)
def getType(self):
return self._type
def createOnComplete(self, media_id):
def onComplete():
try:
media = fireEvent('media.get', media_id, single = True)
if media:
event_name = '%s.searcher.single' % media.get('type')
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())
return onComplete
def createNotifyFront(self, media_id):
def notifyFront():
try:
media = fireEvent('media.get', media_id, single = True)
if media:
event_name = '%s.update' % media.get('type')
fireEvent('notify.frontend', type = event_name, data = media)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())
return notifyFront
def getDefaultTitle(self, info, default_title = None):
# Set default title
default_title = default_title if default_title else toUnicode(info.get('title'))
titles = info.get('titles', [])
counter = 0
def_title = None
for title in titles:
if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
def_title = toUnicode(title)
break
counter += 1
if not def_title and titles and len(titles) > 0:
def_title = toUnicode(titles[0])
return def_title or 'UNKNOWN'
def getPoster(self, media, image_urls):
if 'files' not in media:
media['files'] = {}
existing_files = media['files']
image_type = 'poster'
file_type = 'image_%s' % image_type
# Make existing unique
unique_files = list(set(existing_files.get(file_type, [])))
# Remove files that can't be found
for ef in unique_files:
if not os.path.isfile(ef):
unique_files.remove(ef)
# Replace new files list
existing_files[file_type] = unique_files
if len(existing_files) == 0:
del existing_files[file_type]
images = image_urls.get(image_type, [])
for y in ['SX300', 'tmdb']:
initially_try = [x for x in images if y in x]
images[:-1] = initially_try
# Loop over type
for image in images:
if not isinstance(image, (str, unicode)):
continue
# Check if it has top image
filename = '%s.%s' % (md5(image), getExt(image))
existing = existing_files.get(file_type, [])
has_latest = False
for x in existing:
if filename in x:
has_latest = True
if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
existing_files[file_type] = [toUnicode(file_path)]
break
else:
break
| gpl-3.0 |
shastah/spacewalk | client/debian/packages-already-in-debian/rhn-client-tools/src/firstboot/rhn_choose_server_gui.py | 17 | 3410 | # Copyright 2006--2010 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors:
# Jan Pazdziora jpazdziora at redhat dot com
# Daniel Benamy <dbenamy@redhat.com>
import sys
sys.path.append("/usr/share/rhn")
from up2date_client import rhnreg
from up2date_client import rhnregGui
from up2date_client import up2dateErrors
import gtk
from gtk import glade
import gettext
_ = lambda x: gettext.ldgettext("rhn-client-tools", x)
gtk.glade.bindtextdomain("rhn-client-tools")
from firstboot.module import Module
from firstboot.constants import *
class moduleClass(Module):
def __init__(self):
Module.__init__(self)
self.priority = 106.5
self.sidebarTitle = _("Choose Server")
self.title = _("Choose Server")
self.support_sm = False
self.rhsmActive = True
def needsNetwork(self):
return True
def apply(self, interface, testing=False):
if testing:
return RESULT_SUCCESS
if self.support_sm \
and not self.chooseServerPage.chooseServerXml.get_widget("hostedButton").get_active() \
and not self.chooseServerPage.chooseServerXml.get_widget("satelliteButton").get_active():
i = 0
while not interface.moduleList[i].__module__.startswith('rhsm_'):
i += 1
interface.moveToPage(pageNum=i)
self.rhsmActive = True
return RESULT_JUMP
try:
self.rhsmActive = False
if self.chooseServerPage.chooseServerPageApply() is False:
interface.moveToPage(moduleTitle=_("Red Hat Login"))
return RESULT_JUMP
else:
return RESULT_FAILURE
except up2dateErrors.SSLCertificateVerifyFailedError:
interface.moveToPage(moduleTitle=_("Provide Certificate"))
return RESULT_JUMP
# return RESULT_SUCCESS should work just as well since the
# certificate page with priority 107 is the next one anyway
def createScreen(self):
self.chooseServerPage = rhnregGui.ChooseServerPage()
self.vbox = gtk.VBox(spacing=5)
self.vbox.pack_start(self.chooseServerPage.chooseServerPageVbox(), True, True)
if sys.modules.has_key('rhsm_login'):
self.support_sm = True
self.rhsmButton = self.chooseServerPage.chooseServerXml.get_widget("rhsmButton")
self.rhsmButton.set_no_show_all(False)
self.rhsmButton.show_all()
def initializeUI(self):
self.chooseServerPage.chooseServerPagePrepare()
if self.support_sm and self.rhsmActive:
self.rhsmButton.set_active(True)
def shouldAppear(self):
if rhnreg.registered():
return False
return True
| gpl-2.0 |
c-a/jhbuild | jhbuild/utils/sxml.py | 8 | 2925 | # jhbuild - a tool to ease building collections of source packages
# Copyright (C) 2008 Andy Wingo <wingo@pobox.com>
#
# sxml.py: xml as s-expressions
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
An s-expression syntax for XML documents, together with a serializer to
UTF-8.
Use like this:
>>> x = [sxml.h1, "text"]
>>> sxml_to_string (x)
"<h1>text</h1>"
>>> x = [sxml.a(href="about:blank", title="foo"), [sxml.i, "italics & stuff"]]
>>> sxml_to_string (x)
"<a href="about:blank" title="foo"><i>italics & stuff</i></a>"
"""
__all__ = ['sxml', 'sxml_to_string']
# from Django, originally. used to make sure xml is utf-8.
def smart_str(s, encoding='utf-8', errors='strict'):
# Returns a bytestring version of 's', encoded as specified in 'encoding'.
if not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def quote(s):
quoted = {'"': '"',
'&': '&',
'<': '<',
'>': '>'}
return ''.join([quoted.get(c,c) for c in s])
def sxml_to_string(expr):
if not isinstance(expr, list):
return smart_str(quote(expr))
operator = expr[0]
args = [sxml_to_string(arg) for arg in expr[1:]]
return smart_str(operator(args))
class sxml:
def __getattr__(self, attr):
def _trans(k):
table = {'klass': 'class'}
return table.get(k, k)
def tag(*targs, **kw):
def render(args):
return ('<%s%s>%s</%s>'
% (attr,
''.join([' %s="%s"' % (_trans(k), quote(v))
for k, v in kw.items()]),
'\n'.join(args),
attr))
render.__name__ = attr
if targs:
return render(targs[0])
else:
return render
# this only works with python2.4
tag.__name__ = attr
return tag
sxml = sxml()
| gpl-2.0 |
ahu-odoo/odoo | addons/account_payment/account_move_line.py | 66 | 2705 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_move_line(osv.osv):
_inherit = "account.move.line"
def line2bank(self, cr, uid, ids, payment_type=None, context=None):
"""
Try to return for each Ledger Posting line a corresponding bank
account according to the payment type. This work using one of
the bank of the partner defined on the invoice eventually
associated to the line.
Return the first suitable bank for the corresponding partner.
"""
payment_mode_obj = self.pool.get('payment.mode')
line2bank = {}
if not ids:
return {}
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type,
context=context)
for line in self.browse(cr, uid, ids, context=context):
line2bank[line.id] = False
if line.invoice and line.invoice.partner_bank_id:
line2bank[line.id] = line.invoice.partner_bank_id.id
elif line.partner_id:
if not line.partner_id.bank_ids:
line2bank[line.id] = False
else:
for bank in line.partner_id.bank_ids:
if bank.state in bank_type:
line2bank[line.id] = bank.id
break
if line.id not in line2bank and line.partner_id.bank_ids:
line2bank[line.id] = line.partner_id.bank_ids[0].id
else:
raise osv.except_osv(_('Error!'), _('There is no partner defined on the entry line.'))
return line2bank
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
psdh/servo | tests/wpt/css-tests/tools/webdriver/webdriver/keys.py | 263 | 1481 | """Constants for special keys."""
class Keys:
"""Constants for special keys."""
NULL = '\uE000'
CANCEL = '\uE001'
HELP = '\uE002'
BACK_SPACE = '\uE003'
TAB = '\uE004'
CLEAR = '\uE005'
RETURN = '\uE006'
ENTER = '\uE007'
SHIFT = '\uE008'
LEFT_SHIFT = '\uE008'
CONTROL = '\uE009'
LEFT_CONTROL = '\uE009'
ALT = '\uE00A'
LEFT_ALT = '\uE00A'
PAUSE = '\uE00B'
ESCAPE = '\uE00C'
SPACE = '\uE00D'
PAGE_UP = '\uE00E'
PAGE_DOWN = '\uE00F'
END = '\uE010'
HOME = '\uE011'
LEFT = '\uE012'
ARROW_LEFT = '\uE012'
UP = '\uE013'
ARROW_UP = '\uE013'
RIGHT = '\uE014'
ARROW_RIGHT = '\uE014'
DOWN = '\uE015'
ARROW_DOWN = '\uE015'
INSERT = '\uE016'
DELETE = '\uE017'
SEMICOLON = '\uE018'
EQUALS = '\uE019'
NUMPAD0 = '\uE01A'
NUMPAD1 = '\uE01B'
NUMPAD2 = '\uE01C'
NUMPAD3 = '\uE01D'
NUMPAD4 = '\uE01E'
NUMPAD5 = '\uE01F'
NUMPAD6 = '\uE020'
NUMPAD7 = '\uE021'
NUMPAD8 = '\uE022'
NUMPAD9 = '\uE023'
MULTIPLY = '\uE024'
ADD = '\uE025'
SEPARATOR = '\uE026'
SUBTRACT = '\uE027'
DECIMAL = '\uE028'
DIVIDE = '\uE029'
F1 = '\uE031'
F2 = '\uE032'
F3 = '\uE033'
F4 = '\uE034'
F5 = '\uE035'
F6 = '\uE036'
F7 = '\uE037'
F8 = '\uE038'
F9 = '\uE039'
F10 = '\uE03A'
F11 = '\uE03B'
F12 = '\uE03C'
META = '\uE03D'
COMMAND = '\uE03D'
ZENKAKU_HANKAKU = '\uE040'
| mpl-2.0 |
dfalt974/SickRage | sickbeard/notifiers/emby.py | 2 | 4219 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
from six.moves import urllib
import sickbeard
from sickbeard import logger
from sickchill.helper.exceptions import ex
try:
import json
except ImportError:
import simplejson as json
class Notifier(object):
def _notify_emby(self, message, host=None, emby_apikey=None):
"""Handles notifying Emby host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
# fill in omitted parameters
if not host:
host = sickbeard.EMBY_HOST
if not emby_apikey:
emby_apikey = sickbeard.EMBY_APIKEY
url = 'http://{0}/emby/Notifications/Admin'.format(host)
values = {'Name': 'SickChill', 'Description': message, 'ImageUrl': sickbeard.LOGO_URL}
data = json.dumps(values)
try:
req = urllib.request.Request(url, data)
req.add_header('X-MediaBrowser-Token', emby_apikey)
req.add_header('Content-Type', 'application/json')
response = urllib.request.urlopen(req)
result = response.read()
response.close()
logger.log('EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib.error.URLError, IOError) as e:
logger.log('EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
##############################################################################
# Public functions
##############################################################################
def test_notify(self, host, emby_apikey):
return self._notify_emby('This is a test notification from SickChill', host, emby_apikey)
def update_library(self, show=None):
"""Handles updating the Emby Media Server host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
if sickbeard.USE_EMBY:
if not sickbeard.EMBY_HOST:
logger.log('EMBY: No host specified, check your settings', logger.DEBUG)
return False
if show:
if show.indexer == 1:
provider = 'tvdb'
elif show.indexer == 2:
logger.log('EMBY: TVRage Provider no longer valid', logger.WARNING)
return False
else:
logger.log('EMBY: Provider unknown', logger.WARNING)
return False
query = '?{0}id={1}'.format(provider, show.indexerid)
else:
query = ''
url = 'http://{0}/emby/Library/Series/Updated{1}'.format(sickbeard.EMBY_HOST, query)
values = {}
data = urllib.parse.urlencode(values)
try:
req = urllib.request.Request(url, data)
req.add_header('X-MediaBrowser-Token', sickbeard.EMBY_APIKEY)
response = urllib.request.urlopen(req)
result = response.read()
response.close()
logger.log('EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib.error.URLError, IOError) as e:
logger.log('EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
| gpl-3.0 |
pymedusa/Medusa | medusa/clients/torrent/mlnet.py | 2 | 1427 | # coding=utf-8
"""MLDonkey Client."""
from __future__ import unicode_literals
from medusa.clients.torrent.generic import GenericClient
class MLNetAPI(GenericClient):
"""MLDonkey API class."""
def __init__(self, host=None, username=None, password=None):
"""Constructor.
:param host:
:type host: string
:param username:
:type username: string
:param password:
:type password: string
"""
super(MLNetAPI, self).__init__('mlnet', host, username, password)
self.url = self.host
# self.session.auth = HTTPDigestAuth(self.username, self.password);
def _get_auth(self):
try:
self.response = self.session.get(self.host, verify=False)
self.auth = self.response.content
except Exception:
return None
return self.auth if not self.response.status_code == 404 else None
def _add_torrent_uri(self, result):
self.url = '{host}submit'.format(host=self.host)
params = {
'q': 'dllink {url}'.format(url=result.url),
}
return self._request(method='get', params=params)
def _add_torrent_file(self, result):
self.url = '{host}submit'.format(host=self.host)
params = {
'q': 'dllink {url}'.format(url=result.url),
}
return self._request(method='get', params=params)
api = MLNetAPI
| gpl-3.0 |
leighpauls/k2cro4 | third_party/python_26/Lib/site-packages/win32/lib/win32pdhutil.py | 21 | 7360 | """Utilities for the win32 Performance Data Helper module
Example:
To get a single bit of data:
>>> import win32pdhutil
>>> win32pdhutil.GetPerformanceAttributes("Memory", "Available Bytes")
6053888
>>> win32pdhutil.FindPerformanceAttributesByName("python", counter="Virtual Bytes")
[22278144]
First example returns data which is not associated with any specific instance.
The second example reads data for a specific instance - hence the list return -
it would return one result for each instance of Python running.
In general, it can be tricky finding exactly the "name" of the data you wish to query.
Although you can use <om win32pdh.EnumObjectItems>(None,None,(eg)"Memory", -1) to do this,
the easiest way is often to simply use PerfMon to find out the names.
"""
import win32pdh, string, time
error = win32pdh.error
# Handle some localization issues.
# see http://support.microsoft.com/default.aspx?scid=http://support.microsoft.com:80/support/kb/articles/Q287/1/59.asp&NoWebContent=1
# Build a map of english_counter_name: counter_id
counter_english_map = {}
def find_pdh_counter_localized_name(english_name, machine_name = None):
if not counter_english_map:
import win32api, win32con
counter_reg_value = win32api.RegQueryValueEx(win32con.HKEY_PERFORMANCE_DATA,
"Counter 009")
counter_list = counter_reg_value[0]
for i in range(0, len(counter_list) - 1, 2):
try:
counter_id = int(counter_list[i])
except ValueError:
continue
counter_english_map[counter_list[i+1].lower()] = counter_id
return win32pdh.LookupPerfNameByIndex(machine_name, counter_english_map[english_name.lower()])
def GetPerformanceAttributes(object, counter, instance = None, inum=-1,
format = win32pdh.PDH_FMT_LONG, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://support.microsoft.com/default.aspx?scid=kb;EN-US;q262938
# and http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
path = win32pdh.MakeCounterPath( (machine,object,instance, None, inum,counter) )
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def FindPerformanceAttributesByName(instanceName, object = None,
counter = None,
format = win32pdh.PDH_FMT_LONG,
machine = None, bRefresh=0):
"""Find peformance attributes by (case insensitive) instance name.
Given a process name, return a list with the requested attributes.
Most useful for returning a tuple of PIDs given a process name.
"""
if object is None: object = find_pdh_counter_localized_name("Process", machine)
if counter is None: counter = find_pdh_counter_localized_name("ID Process", machine)
if bRefresh: # PDH docs say this is how you do a refresh.
win32pdh.EnumObjects(None, machine, 0, 1)
instanceName = string.lower(instanceName)
items, instances = win32pdh.EnumObjectItems(None,None,object, -1)
# Track multiple instances.
instance_dict = {}
for instance in instances:
try:
instance_dict[instance] = instance_dict[instance] + 1
except KeyError:
instance_dict[instance] = 0
ret = []
for instance, max_instances in instance_dict.items():
for inum in xrange(max_instances+1):
if string.lower(instance) == instanceName:
ret.append(GetPerformanceAttributes(object, counter,
instance, inum, format,
machine))
return ret
def ShowAllProcesses():
object = find_pdh_counter_localized_name("Process")
items, instances = win32pdh.EnumObjectItems(None,None,object,
win32pdh.PERF_DETAIL_WIZARD)
# Need to track multiple instances of the same name.
instance_dict = {}
for instance in instances:
try:
instance_dict[instance] = instance_dict[instance] + 1
except KeyError:
instance_dict[instance] = 0
# Bit of a hack to get useful info.
items = [find_pdh_counter_localized_name("ID Process")] + items[:5]
print "Process Name", string.join(items,",")
for instance, max_instances in instance_dict.items():
for inum in xrange(max_instances+1):
hq = win32pdh.OpenQuery()
hcs = []
for item in items:
path = win32pdh.MakeCounterPath( (None,object,instance,
None, inum, item) )
hcs.append(win32pdh.AddCounter(hq, path))
win32pdh.CollectQueryData(hq)
# as per http://support.microsoft.com/default.aspx?scid=kb;EN-US;q262938, some "%" based
# counters need two collections
time.sleep(0.01)
win32pdh.CollectQueryData(hq)
print "%-15s\t" % (instance[:15]),
for hc in hcs:
type, val = win32pdh.GetFormattedCounterValue(hc, win32pdh.PDH_FMT_LONG)
print "%5d" % (val),
win32pdh.RemoveCounter(hc)
print
win32pdh.CloseQuery(hq)
def BrowseCallBackDemo(counter):
machine, object, instance, parentInstance, index, counterName = \
win32pdh.ParseCounterPath(counter)
result = GetPerformanceAttributes(object, counterName, instance, index,
win32pdh.PDH_FMT_DOUBLE, machine)
print "Value of '%s' is" % counter, result
print "Added '%s' on object '%s' (machine %s), instance %s(%d)-parent of %s" \
% (counterName, object, machine, instance, index, parentInstance)
def browse(callback = BrowseCallBackDemo, title="Python Browser",
level=win32pdh.PERF_DETAIL_WIZARD):
win32pdh.BrowseCounters(None,0, callback, level, title)
if __name__=='__main__':
ShowAllProcesses()
# Show how to get a couple of attributes by name.
counter = find_pdh_counter_localized_name("Virtual Bytes")
print "Virtual Bytes = ", FindPerformanceAttributesByName("python",
counter=counter)
print "Available Bytes = ", GetPerformanceAttributes(
find_pdh_counter_localized_name("Memory"),
find_pdh_counter_localized_name("Available Bytes"))
# And a browser.
print "Browsing for counters..."
browse()
| bsd-3-clause |
tencentyun/Cloud-Image-Migration-Tool | usr/lib/oss2/api.py | 4 | 40750 | # -*- coding: utf-8 -*-
"""
文件上传方法中的data参数
------------------------
诸如 :func:`put_object <Bucket.put_object>` 这样的上传接口都会有 `data` 参数用于接收用户数据。`data` 可以是下述类型
- unicode类型(对于Python3则是str类型):内部会自动转换为UTF-8的bytes
- bytes类型:不做任何转换
- file-like object:对于可以seek和tell的file object,从当前位置读取直到结束。其他类型,请确保当前位置是文件开始。
- 可迭代类型:对于无法探知长度的数据,要求一定是可迭代的。此时会通过Chunked Encoding传输。
Bucket配置修改方法中的input参数
-----------------------------
诸如 :func:`put_bucket_cors <Bucket.put_bucket_cors>` 这样的Bucket配置修改接口都会有 `input` 参数接收用户提供的配置数据。
`input` 可以是下述类型
- Bucket配置信息相关的类,如 `BucketCors`
- unicode类型(对于Python3则是str类型)
- 经过utf-8编码的bytes类型
- file-like object
- 可迭代类型,会通过Chunked Encoding传输
也就是说 `input` 参数可以比 `data` 参数多接受第一种类型的输入。
返回值
------
:class:`Service` 和 :class:`Bucket` 类的大多数方法都是返回 :class:`RequestResult <oss2.models.RequestResult>`
及其子类。`RequestResult` 包含了HTTP响应的状态码、头部以及OSS Request ID,而它的子类则包含用户真正想要的结果。例如,
`ListBucketsResult.buckets` 就是返回的Bucket信息列表;`GetObjectResult` 则是一个file-like object,可以调用 `read()` 来获取响应的
HTTP包体。
异常
----
一般来说Python SDK可能会抛出三种类型的异常,这些异常都继承于 :class:`OssError <oss2.exceptions.OssError>` :
- :class:`ClientError <oss2.exceptions.ClientError>` :由于用户参数错误而引发的异常;
- :class:`ServerError <oss2.exceptions.ServerError>` 及其子类:OSS服务器返回非成功的状态码,如4xx或5xx;
- :class:`RequestError <oss2.exceptions.RequestError>` :底层requests库抛出的异常,如DNS解析错误,超时等;
当然,`Bucket.put_object_from_file` 和 `Bucket.get_object_to_file` 这类函数还会抛出文件相关的异常。
.. _byte_range:
指定下载范围
------------
诸如 :func:`get_object <Bucket.get_object>` 以及 :func:`upload_part_copy <Bucket.upload_part_copy>` 这样的函数,可以接受
`byte_range` 参数,表明读取数据的范围。该参数是一个二元tuple:(start, last)。这些接口会把它转换为Range头部的值,如:
- byte_range 为 (0, 99) 转换为 'bytes=0-99',表示读取前100个字节
- byte_range 为 (None, 99) 转换为 'bytes=-99',表示读取最后99个字节
- byte_range 为 (100, None) 转换为 'bytes=100-',表示读取第101个字节到文件结尾的部分(包含第101个字节)
分页罗列
-------
罗列各种资源的接口,如 :func:`list_buckets <Service.list_buckets>` 、 :func:`list_objects <Bucket.list_objects>` 都支持
分页查询。通过设定分页标记(如:`marker` 、 `key_marker` )的方式可以指定查询某一页。首次调用将分页标记设为空(缺省值,可以不设),
后续的调用使用返回值中的 `next_marker` 、 `next_key_marker` 等。每次调用后检查返回值中的 `is_truncated` ,其值为 `False` 说明
已经到了最后一页。
.. _progress_callback:
上传下载进度
-----------
上传下载接口,诸如 `get_object` 、 `put_object` 、`resumable_upload`,都支持进度回调函数,可以用它实现进度条等功能。
`progress_callback` 的函数原型如下 ::
def progress_callback(bytes_consumed, total_bytes):
'''进度回调函数。
:param int bytes_consumed: 已经消费的字节数。对于上传,就是已经上传的量;对于下载,就是已经下载的量。
:param int total_bytes: 总长度。
'''
其中 `total_bytes` 对于上传和下载有不同的含义:
- 上传:当输入是bytes或可以seek/tell的文件对象,那么它的值就是总的字节数;否则,其值为None
- 下载:当返回的HTTP相应中有Content-Length头部,那么它的值就是Content-Length的值;否则,其值为None
.. _unix_time:
Unix Time
---------
OSS Python SDK会把从服务器获得时间戳都转换为自1970年1月1日UTC零点以来的秒数,即Unix Time。
参见 `Unix Time <https://en.wikipedia.org/wiki/Unix_time>`_
OSS中常用的时间格式有
- HTTP Date格式,形如 `Sat, 05 Dec 2015 11:04:39 GMT` 这样的GMT时间。
用在If-Modified-Since、Last-Modified这些HTTP请求、响应头里。
- ISO8601格式,形如 `2015-12-05T00:00:00.000Z`。
用在生命周期管理配置、列举Bucket结果中的创建时间、列举文件结果中的最后修改时间等处。
`http_date` 函数把Unix Time转换为HTTP Date;而 `http_to_unixtime` 则做相反的转换。如 ::
>>> import oss2, time
>>> unix_time = int(time.time()) # 当前UNIX Time,设其职为 1449313829
>>> date_str = oss2.http_date(unix_time) # 得到 'Sat, 05 Dec 2015 11:10:29 GMT'
>>> oss2.http_to_unixtime(date_str) # 得到 1449313829
.. note::
生成HTTP协议所需的日期(即HTTP Date)时,请使用 `http_date` , 不要使用 `strftime` 这样的函数。因为后者是和locale相关的。
比如,`strftime` 结果中可能会出现中文,而这样的格式,OSS服务器是不能识别的。
`iso8601_to_unixtime` 把ISO8601格式转换为Unix Time;`date_to_iso8601` 和 `iso8601_to_date` 则在ISO8601格式的字符串和
datetime.date之间相互转换。如 ::
>>> import oss2
>>> d = oss2.iso8601_to_date('2015-12-05T00:00:00.000Z') # 得到 datetime.date(2015, 12, 5)
>>> date_str = oss2.date_to_iso8601(d) # 得到 '2015-12-05T00:00:00.000Z'
>>> oss2.iso8601_to_unixtime(date_str) # 得到 1449273600
"""
from . import xml_utils
from . import http
from . import utils
from . import exceptions
from . import defaults
from .models import *
from .compat import urlquote, urlparse, to_unicode, to_string
import time
import shutil
import oss2.utils
class _Base(object):
def __init__(self, auth, endpoint, is_cname, session, connect_timeout,
app_name=''):
self.auth = auth
self.endpoint = _normalize_endpoint(endpoint.strip())
self.session = session or http.Session()
self.timeout = defaults.get(connect_timeout, defaults.connect_timeout)
self.app_name = app_name
self._make_url = _UrlMaker(self.endpoint, is_cname)
def _do(self, method, bucket_name, key, **kwargs):
key = to_string(key)
req = http.Request(method, self._make_url(bucket_name, key),
app_name=self.app_name,
**kwargs)
self.auth._sign_request(req, bucket_name, key)
resp = self.session.do_request(req, timeout=self.timeout)
if resp.status // 100 != 2:
raise exceptions.make_exception(resp)
return resp
def _parse_result(self, resp, parse_func, klass):
result = klass(resp)
parse_func(result, resp.read())
return result
class Service(_Base):
"""用于Service操作的类,如罗列用户所有的Bucket。
用法 ::
>>> import oss2
>>> auth = oss2.Auth('your-access-key-id', 'your-access-key-secret')
>>> service = oss2.Service(auth, 'oss-cn-hangzhou.aliyuncs.com')
>>> service.list_buckets()
<oss2.models.ListBucketsResult object at 0x0299FAB0>
:param auth: 包含了用户认证信息的Auth对象
:type auth: oss2.Auth
:param str endpoint: 访问域名,如杭州区域的域名为oss-cn-hangzhou.aliyuncs.com
:param session: 会话。如果是None表示新开会话,非None则复用传入的会话
:type session: oss2.Session
:param float connect_timeout: 连接超时时间,以秒为单位。
:param str app_name: 应用名。该参数不为空,则在User Agent中加入其值。
注意到,最终这个字符串是要作为HTTP Header的值传输的,所以必须要遵循HTTP标准。
"""
def __init__(self, auth, endpoint,
session=None,
connect_timeout=None,
app_name=''):
super(Service, self).__init__(auth, endpoint, False, session, connect_timeout,
app_name=app_name)
def list_buckets(self, prefix='', marker='', max_keys=100):
"""根据前缀罗列用户的Bucket。
:param str prefix: 只罗列Bucket名为该前缀的Bucket,空串表示罗列所有的Bucket
:param str marker: 分页标志。首次调用传空串,后续使用返回值中的next_marker
:param int max_keys: 每次调用最多返回的Bucket数目
:return: 罗列的结果
:rtype: oss2.models.ListBucketsResult
"""
resp = self._do('GET', '', '',
params={'prefix': prefix,
'marker': marker,
'max-keys': str(max_keys)})
return self._parse_result(resp, xml_utils.parse_list_buckets, ListBucketsResult)
class Bucket(_Base):
"""用于Bucket和Object操作的类,诸如创建、删除Bucket,上传、下载Object等。
用法(假设Bucket属于杭州区域) ::
>>> import oss2
>>> auth = oss2.Auth('your-access-key-id', 'your-access-key-secret')
>>> bucket = oss2.Bucket(auth, 'http://oss-cn-hangzhou.aliyuncs.com', 'your-bucket')
>>> bucket.put_object('readme.txt', 'content of the object')
<oss2.models.PutObjectResult object at 0x029B9930>
:param auth: 包含了用户认证信息的Auth对象
:type auth: oss2.Auth
:param str endpoint: 访问域名或者CNAME
:param str bucket_name: Bucket名
:param bool is_cname: 如果endpoint是CNAME则设为True;反之,则为False。
:param session: 会话。如果是None表示新开会话,非None则复用传入的会话
:type session: oss2.Session
:param float connect_timeout: 连接超时时间,以秒为单位。
:param str app_name: 应用名。该参数不为空,则在User Agent中加入其值。
注意到,最终这个字符串是要作为HTTP Header的值传输的,所以必须要遵循HTTP标准。
"""
ACL = 'acl'
CORS = 'cors'
LIFECYCLE = 'lifecycle'
LOCATION = 'location'
LOGGING = 'logging'
REFERER = 'referer'
WEBSITE = 'website'
def __init__(self, auth, endpoint, bucket_name,
is_cname=False,
session=None,
connect_timeout=None,
app_name=''):
super(Bucket, self).__init__(auth, endpoint, is_cname, session, connect_timeout,
app_name=app_name)
self.bucket_name = bucket_name.strip()
def sign_url(self, method, key, expires, headers=None, params=None):
"""生成签名URL。
常见的用法是生成加签的URL以供授信用户下载,如为log.jpg生成一个5分钟后过期的下载链接::
>>> bucket.sign_url('GET', 'log.jpg', 5 * 60)
'http://your-bucket.oss-cn-hangzhou.aliyuncs.com/logo.jpg?OSSAccessKeyId=YourAccessKeyId\&Expires=1447178011&Signature=UJfeJgvcypWq6Q%2Bm3IJcSHbvSak%3D'
:param method: HTTP方法,如'GET'、'PUT'、'DELETE'等
:type method: str
:param key: 文件名
:param expires: 过期时间(单位:秒),链接在当前时间再过expires秒后过期
:param headers: 需要签名的HTTP头部,如名称以x-oss-meta-开头的头部(作为用户自定义元数据)、
Content-Type头部等。对于下载,不需要填。
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param params: 需要签名的HTTP查询参数
:return: 签名URL。
"""
key = to_string(key)
req = http.Request(method, self._make_url(self.bucket_name, key),
headers=headers,
params=params)
return self.auth._sign_url(req, self.bucket_name, key, expires)
def list_objects(self, prefix='', delimiter='', marker='', max_keys=100):
"""根据前缀罗列Bucket里的文件。
:param str prefix: 只罗列文件名为该前缀的文件
:param str delimiter: 分隔符。可以用来模拟目录
:param str marker: 分页标志。首次调用传空串,后续使用返回值的next_marker
:param int max_keys: 最多返回文件的个数,文件和目录的和不能超过该值
:return: :class:`ListObjectsResult <oss2.models.ListObjectsResult>`
"""
resp = self.__do_object('GET', '',
params={'prefix': prefix,
'delimiter': delimiter,
'marker': marker,
'max-keys': str(max_keys),
'encoding-type': 'url'})
return self._parse_result(resp, xml_utils.parse_list_objects, ListObjectsResult)
def put_object(self, key, data,
headers=None,
progress_callback=None):
"""上传一个普通文件。
用法 ::
>>> bucket.put_object('readme.txt', 'content of readme.txt')
>>> with open(u'local_file.txt', 'rb') as f:
>>> bucket.put_object('remote_file.txt', f)
:param key: 上传到OSS的文件名
:param data: 待上传的内容。
:type data: bytes,str或file-like object
:param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-meta-开头的头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key)
if progress_callback:
data = utils.make_progress_adapter(data, progress_callback)
resp = self.__do_object('PUT', key, data=data, headers=headers)
return PutObjectResult(resp)
def put_object_from_file(self, key, filename,
headers=None,
progress_callback=None):
"""上传一个本地文件到OSS的普通文件。
:param str key: 上传到OSS的文件名
:param str filename: 本地文件名,需要有可读权限
:param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-meta-开头的头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), filename)
with open(to_unicode(filename), 'rb') as f:
return self.put_object(key, f, headers=headers, progress_callback=progress_callback)
def append_object(self, key, position, data,
headers=None,
progress_callback=None):
"""追加上传一个文件。
:param str key: 新的文件名,或已经存在的可追加文件名
:param int position: 追加上传一个新的文件, `position` 设为0;追加一个已经存在的可追加文件, `position` 设为文件的当前长度。
`position` 可以从上次追加的结果 `AppendObjectResult.next_position` 中获得。
:param data: 用户数据
:type data: str、bytes、file-like object或可迭代对象
:param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-开头的头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:return: :class:`AppendObjectResult <oss2.models.AppendObjectResult>`
:raises: 如果 `position` 和当前文件长度不一致,抛出 :class:`PositionNotEqualToLength <oss2.exceptions.PositionNotEqualToLength>` ;
如果当前文件不是可追加类型,抛出 :class:`ObjectNotAppendable <oss2.exceptions.ObjectNotAppendable>` ;
还会抛出其他一些异常
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key)
if progress_callback:
data = utils.make_progress_adapter(data, progress_callback)
resp = self.__do_object('POST', key,
data=data,
headers=headers,
params={'append': '', 'position': str(position)})
return AppendObjectResult(resp)
def get_object(self, key,
byte_range=None,
headers=None,
progress_callback=None):
"""下载一个文件。
用法 ::
>>> result = bucket.get_object('readme.txt')
>>> print(result.read())
'hello world'
:param key: 文件名
:param byte_range: 指定下载范围。参见 :ref:`byte_range`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:return: file-like object
:raises: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
headers = http.CaseInsensitiveDict(headers)
range_string = _make_range_string(byte_range)
if range_string:
headers['range'] = range_string
resp = self.__do_object('GET', key, headers=headers)
return GetObjectResult(resp, progress_callback=progress_callback)
def get_object_to_file(self, key, filename,
byte_range=None,
headers=None,
progress_callback=None):
"""下载一个文件到本地文件。
:param key: 文件名
:param filename: 本地文件名。要求父目录已经存在,且有写权限。
:param byte_range: 指定下载范围。参见 :ref:`byte_range`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:return: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
with open(to_unicode(filename), 'wb') as f:
result = self.get_object(key, byte_range=byte_range, headers=headers, progress_callback=progress_callback)
shutil.copyfileobj(result, f)
return result
def head_object(self, key, headers=None):
"""获取文件元信息。
HTTP响应的头部包含了文件元信息,可以通过 `RequestResult` 的 `headers` 成员获得。
用法 ::
>>> result = bucket.head_object('readme.txt')
>>> print(result.content_type)
text/plain
:param key: 文件名
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`HeadObjectResult <oss2.models.HeadObjectResult>`
:raises: 如果Bucket不存在或者Object不存在,则抛出 :class:`NotFound <oss2.exceptions.NotFound>`
"""
resp = self.__do_object('HEAD', key, headers=headers)
return HeadObjectResult(resp)
def object_exists(self, key):
"""如果文件存在就返回True,否则返回False。如果Bucket不存在,或是发生其他错误,则抛出异常。"""
# 如果我们用head_object来实现的话,由于HTTP HEAD请求没有响应体,只有响应头部,这样当发生404时,
# 我们无法区分是NoSuchBucket还是NoSuchKey错误。
#
# 下面的实现是通过if-modified-since头部,把date设为当前时间24小时后,这样如果文件存在,则会返回
# 304 (NotModified);不存在,则会返回NoSuchKey
date = oss2.utils.http_date(int(time.time()) + 24 * 60 * 60)
try:
self.get_object(key, headers={'if-modified-since': date})
except exceptions.NotModified:
return True
except exceptions.NoSuchKey:
return False
else:
raise exceptions.ClientError('Client time varies too much from server?') # pragma: no cover
def copy_object(self, source_bucket_name, source_key, target_key, headers=None):
"""拷贝一个文件到当前Bucket。
:param str source_bucket_name: 源Bucket名
:param str source_key: 源文件名
:param str target_key: 目标文件名
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = http.CaseInsensitiveDict(headers)
headers['x-oss-copy-source'] = '/' + source_bucket_name + '/' + source_key
resp = self.__do_object('PUT', target_key, headers=headers)
return PutObjectResult(resp)
def update_object_meta(self, key, headers):
"""更改Object的元数据信息,包括Content-Type这类标准的HTTP头部,以及以x-oss-meta-开头的自定义元数据。
用户可以通过 :func:`head_object` 获得元数据信息。
:param str key: 文件名
:param headers: HTTP头部,包含了元数据信息
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`RequestResult <oss2.models.RequestResults>`
"""
return self.copy_object(self.bucket_name, key, key, headers=headers)
def delete_object(self, key):
"""删除一个文件。
:param str key: 文件名
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
resp = self.__do_object('DELETE', key)
return RequestResult(resp)
def put_object_acl(self, key, permission):
"""设置文件的ACL。
:param str key: 文件名
:param str permission: 可以是oss2.OBJECT_ACL_DEFAULT、oss2.OBJECT_ACL_PRIVATE、oss2.OBJECT_ACL_PUBLIC_READ或
oss2.OBJECT_ACL_PUBLIC_READ_WRITE。
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
resp = self.__do_object('PUT', key, params={'acl': ''}, headers={'x-oss-object-acl': permission})
return RequestResult(resp)
def get_object_acl(self, key):
"""获取文件的ACL。
:return: :class:`GetObjectAclResult <oss2.models.GetObjectAclResult>`
"""
resp = self.__do_object('GET', key, params={'acl': ''})
return self._parse_result(resp, xml_utils.parse_get_object_acl, GetObjectAclResult)
def batch_delete_objects(self, key_list):
"""批量删除文件。待删除文件列表不能为空。
:param key_list: 文件名列表,不能为空。
:type key_list: list of str
:return: :class:`BatchDeleteObjectsResult <oss2.models.BatchDeleteObjectsResult>`
"""
if not key_list:
raise ClientError('key_list should not be empty')
data = xml_utils.to_batch_delete_objects_request(key_list, False)
resp = self.__do_object('POST', '',
data=data,
params={'delete': '', 'encoding-type': 'url'},
headers={'Content-MD5': utils.content_md5(data)})
return self._parse_result(resp, xml_utils.parse_batch_delete_objects, BatchDeleteObjectsResult)
def init_multipart_upload(self, key, headers=None):
"""初始化分片上传。
返回值中的 `upload_id` 以及Bucket名和Object名三元组唯一对应了此次分片上传事件。
:param str key: 待上传的文件名
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`InitMultipartUploadResult <oss2.models.InitMultipartUploadResult>`
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key)
resp = self.__do_object('POST', key, params={'uploads': ''}, headers=headers)
return self._parse_result(resp, xml_utils.parse_init_multipart_upload, InitMultipartUploadResult)
def upload_part(self, key, upload_id, part_number, data, progress_callback=None):
"""上传一个分片。
:param str key: 待上传文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:param int part_number: 分片号,最小值是1.
:param data: 待上传数据。
:param progress_callback: 用户指定进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
if progress_callback:
data = utils.make_progress_adapter(data, progress_callback)
resp = self.__do_object('PUT', key,
params={'uploadId': upload_id, 'partNumber': str(part_number)},
data=data)
return PutObjectResult(resp)
def complete_multipart_upload(self, key, upload_id, parts, headers=None):
"""完成分片上传,创建文件。
:param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:param parts: PartInfo列表。PartInfo中的part_number和etag是必填项。其中的etag可以从 :func:`upload_part` 的返回值中得到。
:type parts: list of `PartInfo <oss2.models.PartInfo>`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
data = xml_utils.to_complete_upload_request(sorted(parts, key=lambda p: p.part_number))
resp = self.__do_object('POST', key,
params={'uploadId': upload_id},
data=data,
headers=headers)
return PutObjectResult(resp)
def abort_multipart_upload(self, key, upload_id):
"""取消分片上传。
:param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
resp = self.__do_object('DELETE', key,
params={'uploadId': upload_id})
return RequestResult(resp)
def list_multipart_uploads(self,
prefix='',
delimiter='',
key_marker='',
upload_id_marker='',
max_uploads=1000):
"""罗列正在进行中的分片上传。支持分页。
:param str prefix: 只罗列匹配该前缀的文件的分片上传
:param str delimiter: 目录分割符
:param str key_marker: 文件名分页符。第一次调用可以不传,后续设为返回值中的 `next_key_marker`
:param str upload_id_marker: 分片ID分页符。第一次调用可以不传,后续设为返回值中的 `next_upload_id_marker`
:param int max_uploads: 一次罗列最多能够返回的条目数
:return: :class:`ListMultipartUploadsResult <oss2.models.ListMultipartUploadsResult>`
"""
resp = self.__do_object('GET', '',
params={'uploads': '',
'prefix': prefix,
'delimiter': delimiter,
'key-marker': key_marker,
'upload-id-marker': upload_id_marker,
'max-uploads': str(max_uploads),
'encoding-type': 'url'})
return self._parse_result(resp, xml_utils.parse_list_multipart_uploads, ListMultipartUploadsResult)
def upload_part_copy(self, source_bucket_name, source_key, byte_range,
target_key, target_upload_id, target_part_number,
headers=None):
"""分片拷贝。把一个已有文件的一部分或整体拷贝成目标文件的一个分片。
:param byte_range: 指定待拷贝内容在源文件里的范围。参见 :ref:`byte_range`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = http.CaseInsensitiveDict(headers)
headers['x-oss-copy-source'] = '/' + source_bucket_name + '/' + source_key
range_string = _make_range_string(byte_range)
if range_string:
headers['x-oss-copy-source-range'] = range_string
resp = self.__do_object('PUT', target_key,
params={'uploadId': target_upload_id,
'partNumber': str(target_part_number)},
headers=headers)
return PutObjectResult(resp)
def list_parts(self, key, upload_id,
marker='', max_parts=1000):
"""列举已经上传的分片。支持分页。
:param str key: 文件名
:param str upload_id: 分片上传ID
:param str marker: 分页符
:param int max_parts: 一次最多罗列多少分片
:return: :class:`ListPartsResult <oss2.models.ListPartsResult>`
"""
resp = self.__do_object('GET', key,
params={'uploadId': upload_id,
'part-number-marker': marker,
'max-parts': str(max_parts)})
return self._parse_result(resp, xml_utils.parse_list_parts, ListPartsResult)
def create_bucket(self, permission=None):
"""创建新的Bucket。
:param str permission: 指定Bucket的ACL。可以是oss2.BUCKET_ACL_PRIVATE(推荐、缺省)、oss2.BUCKET_ACL_PUBLIC_READ或是
oss2.BUCKET_ACL_PUBLIC_READ_WRITE。
"""
if permission:
headers = {'x-oss-acl': permission}
else:
headers = None
resp = self.__do_bucket('PUT', headers=headers)
return RequestResult(resp)
def delete_bucket(self):
"""删除一个Bucket。只有没有任何文件,也没有任何未完成的分片上传的Bucket才能被删除。
:return: :class:`RequestResult <oss2.models.RequestResult>`
":raises: 如果试图删除一个非空Bucket,则抛出 :class:`BucketNotEmpty <oss2.exceptions.BucketNotEmpty>`
"""
resp = self.__do_bucket('DELETE')
return RequestResult(resp)
def put_bucket_acl(self, permission):
"""设置Bucket的ACL。
:param str permission: 新的ACL,可以是oss2.BUCKET_ACL_PRIVATE、oss2.BUCKET_ACL_PUBLIC_READ或
oss2.BUCKET_ACL_PUBLIC_READ_WRITE
"""
resp = self.__do_bucket('PUT', headers={'x-oss-acl': permission}, params={Bucket.ACL: ''})
return RequestResult(resp)
def get_bucket_acl(self):
"""获取Bucket的ACL。
:return: :class:`GetBucketAclResult <oss2.models.GetBucketAclResult>`
"""
resp = self.__do_bucket('GET', params={Bucket.ACL: ''})
return self._parse_result(resp, xml_utils.parse_get_bucket_acl, GetBucketAclResult)
def put_bucket_cors(self, input):
"""设置Bucket的CORS。
:param input: :class:`BucketCors <oss2.models.BucketCors>` 对象或其他
"""
data = self.__convert_data(BucketCors, xml_utils.to_put_bucket_cors, input)
resp = self.__do_bucket('PUT', data=data, params={Bucket.CORS: ''})
return RequestResult(resp)
def get_bucket_cors(self):
"""获取Bucket的CORS配置。
:return: :class:`GetBucketCorsResult <oss2.models.GetBucketCorsResult>`
"""
resp = self.__do_bucket('GET', params={Bucket.CORS: ''})
return self._parse_result(resp, xml_utils.parse_get_bucket_cors, GetBucketCorsResult)
def delete_bucket_cors(self):
"""删除Bucket的CORS配置。"""
resp = self.__do_bucket('DELETE', params={Bucket.CORS: ''})
return RequestResult(resp)
def put_bucket_lifecycle(self, input):
"""设置生命周期管理的配置。
:param input: :class:`BucketLifecycle <oss2.models.BucketLifecycle>` 对象或其他
"""
data = self.__convert_data(BucketLifecycle, xml_utils.to_put_bucket_lifecycle, input)
resp = self.__do_bucket('PUT', data=data, params={Bucket.LIFECYCLE: ''})
return RequestResult(resp)
def get_bucket_lifecycle(self):
"""获取生命周期管理配置。
:return: :class:`GetBucketLifecycleResult <oss2.models.GetBucketLifecycleResult>`
:raises: 如果没有设置Lifecycle,则抛出 :class:`NoSuchLifecycle <oss2.exceptions.NoSuchLifecycle>`
"""
resp = self.__do_bucket('GET', params={Bucket.LIFECYCLE: ''})
return self._parse_result(resp, xml_utils.parse_get_bucket_lifecycle, GetBucketLifecycleResult)
def delete_bucket_lifecycle(self):
"""删除生命周期管理配置。如果Lifecycle没有设置,也返回成功。"""
resp = self.__do_bucket('DELETE', params={Bucket.LIFECYCLE: ''})
return RequestResult(resp)
def get_bucket_location(self):
"""获取Bucket的数据中心。
:return: :class:`GetBucketLocationResult <oss2.models.GetBucketLocationResult>`
"""
resp = self.__do_bucket('GET', params={Bucket.LOCATION: ''})
return self._parse_result(resp, xml_utils.parse_get_bucket_location, GetBucketLocationResult)
def put_bucket_logging(self, input):
"""设置Bucket的访问日志功能。
:param input: :class:`BucketLogging <oss2.models.BucketLogging>` 对象或其他
"""
data = self.__convert_data(BucketLogging, xml_utils.to_put_bucket_logging, input)
resp = self.__do_bucket('PUT', data=data, params={Bucket.LOGGING: ''})
return RequestResult(resp)
def get_bucket_logging(self):
"""获取Bucket的访问日志功能配置。
:return: :class:`GetBucketLoggingResult <oss2.models.GetBucketLoggingResult>`
"""
resp = self.__do_bucket('GET', params={Bucket.LOGGING: ''})
return self._parse_result(resp, xml_utils.parse_get_bucket_logging, GetBucketLoggingResult)
def delete_bucket_logging(self):
"""关闭Bucket的访问日志功能。"""
resp = self.__do_bucket('DELETE', params={Bucket.LOGGING: ''})
return RequestResult(resp)
def put_bucket_referer(self, input):
"""为Bucket设置防盗链。
:param input: :class:`BucketReferer <oss2.models.BucketReferer>` 对象或其他
"""
data = self.__convert_data(BucketReferer, xml_utils.to_put_bucket_referer, input)
resp = self.__do_bucket('PUT', data=data, params={Bucket.REFERER: ''})
return RequestResult(resp)
def get_bucket_referer(self):
"""获取Bucket的防盗链配置。
:return: :class:`GetBucketRefererResult <oss2.models.GetBucketRefererResult>`
"""
resp = self.__do_bucket('GET', params={Bucket.REFERER: ''})
return self._parse_result(resp, xml_utils.parse_get_bucket_referer, GetBucketRefererResult)
def put_bucket_website(self, input):
"""为Bucket配置静态网站托管功能。
:param input: :class:`BucketWebsite <oss2.models.BucketWebsite>`
"""
data = self.__convert_data(BucketWebsite, xml_utils.to_put_bucket_website, input)
resp = self.__do_bucket('PUT', data=data, params={Bucket.WEBSITE: ''})
return RequestResult(resp)
def get_bucket_website(self):
"""获取Bucket的静态网站托管配置。
:return: :class:`GetBucketWebsiteResult <oss2.models.GetBucketWebsiteResult>`
:raises: 如果没有设置静态网站托管,那么就抛出 :class:`NoSuchWebsite <oss2.exceptions.NoSuchWebsite>`
"""
resp = self.__do_bucket('GET', params={Bucket.WEBSITE: ''})
return self._parse_result(resp, xml_utils.parse_get_bucket_websiste, GetBucketWebsiteResult)
def delete_bucket_website(self):
"""关闭Bucket的静态网站托管功能。"""
resp = self.__do_bucket('DELETE', params={Bucket.WEBSITE: ''})
return RequestResult(resp)
def _get_bucket_config(self, config):
"""获得Bucket某项配置,具体哪种配置由 `config` 指定。该接口直接返回 `RequestResult` 对象。
通过read()接口可以获得XML字符串。不建议使用。
:param str config: 可以是 `Bucket.ACL` 、 `Bucket.LOGGING` 等。
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
return self.__do_bucket('GET', params={config: ''})
def __do_object(self, method, key, **kwargs):
return self._do(method, self.bucket_name, key, **kwargs)
def __do_bucket(self, method, **kwargs):
return self._do(method, self.bucket_name, '', **kwargs)
def __convert_data(self, klass, converter, data):
if isinstance(data, klass):
return converter(data)
else:
return data
def _normalize_endpoint(endpoint):
if not endpoint.startswith('http://') and not endpoint.startswith('https://'):
return 'http://' + endpoint
else:
return endpoint
_ENDPOINT_TYPE_ALIYUN = 0
_ENDPOINT_TYPE_CNAME = 1
_ENDPOINT_TYPE_IP = 2
def _make_range_string(range):
if range is None:
return ''
start = range[0]
last = range[1]
if start is None and last is None:
return ''
return 'bytes=' + _range(start, last)
def _range(start, last):
def to_str(pos):
if pos is None:
return ''
else:
return str(pos)
return to_str(start) + '-' + to_str(last)
def _determine_endpoint_type(netloc, is_cname, bucket_name):
if utils.is_ip_or_localhost(netloc):
return _ENDPOINT_TYPE_IP
if is_cname:
return _ENDPOINT_TYPE_CNAME
if utils.is_valid_bucket_name(bucket_name):
return _ENDPOINT_TYPE_ALIYUN
else:
return _ENDPOINT_TYPE_IP
class _UrlMaker(object):
def __init__(self, endpoint, is_cname):
p = urlparse(endpoint)
self.scheme = p.scheme
self.netloc = p.netloc
self.is_cname = is_cname
def __call__(self, bucket_name, key):
self.type = _determine_endpoint_type(self.netloc, self.is_cname, bucket_name)
key = urlquote(key, '')
if self.type == _ENDPOINT_TYPE_CNAME:
return '{0}://{1}/{2}'.format(self.scheme, self.netloc, key)
if self.type == _ENDPOINT_TYPE_IP:
if bucket_name:
return '{0}://{1}/{2}/{3}'.format(self.scheme, self.netloc, bucket_name, key)
else:
return '{0}://{1}/{2}'.format(self.scheme, self.netloc, key)
if not bucket_name:
assert not key
return '{0}://{1}'.format(self.scheme, self.netloc)
return '{0}://{1}.{2}/{3}'.format(self.scheme, bucket_name, self.netloc, key)
| mit |
ehashman/oh-mainline | mysite/customs/migrations/0013_fix_fieldname_in_bugzillaurl.py | 16 | 4442 | # This file is part of OpenHatch.
# Copyright (C) 2010 Jack Grigg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.customs.models import *
class Migration:
def forwards(self, orm):
# Adding field 'BugzillaUrl.tracker'
db.add_column('customs_bugzillaurl', 'tracker', orm['customs.bugzillaurl:tracker'])
# Deleting field 'BugzillaUrl.bugzilla_tracker'
db.delete_column('customs_bugzillaurl', 'bugzilla_tracker_id')
def backwards(self, orm):
# Deleting field 'BugzillaUrl.tracker'
db.delete_column('customs_bugzillaurl', 'tracker_id')
# Adding field 'BugzillaUrl.bugzilla_tracker'
db.add_column('customs_bugzillaurl', 'bugzilla_tracker', orm['customs.bugzillaurl:bugzilla_tracker'])
models = {
'customs.bugzillatracker': {
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'base_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'bitesized_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'bitesized_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10'}),
'bug_project_name_format': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'documentation_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'query_url_type': ('django.db.models.fields.CharField', [], {'default': "'xml'", 'max_length': '20'})
},
'customs.bugzillaurl': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.BugzillaTracker']", 'null': True}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
'customs.recentmessagefromcia': {
'branch': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'committer_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'module': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'time_received': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'customs.webresponse': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['customs']
| agpl-3.0 |
defionscode/ansible | lib/ansible/plugins/callback/splunk.py | 54 | 8017 | # -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: splunk
type: aggregate
short_description: Sends task result events to Splunk HTTP Event Collector
author: "Stuart Hirst <support@convergingdata.com>"
description:
- This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
- The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
- Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
version_added: "2.7"
requirements:
- Whitelisting this callback plugin
- 'Create a HTTP Event Collector in Splunk'
- 'Define the url and token in ansible.cfg'
options:
url:
description: URL to the Splunk HTTP collector source
env:
- name: SPLUNK_URL
ini:
- section: callback_splunk
key: url
authtoken:
description: Token to authenticate the connection to the Splunk HTTP collector
env:
- name: SPLUNK_AUTHTOKEN
ini:
- section: callback_splunk
key: authtoken
'''
EXAMPLES = '''
examples: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = splunk
Set the environment variable
export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event
export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88
Set the ansible.cfg variable in the callback_splunk block
[callback_splunk]
url = http://mysplunkinstance.datapaas.io:8088/services/collector/event
authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88
'''
import json
import uuid
import socket
import getpass
from datetime import datetime
from os.path import basename
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
class SplunkHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
self.user = getpass.getuser()
def send_event(self, url, authtoken, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
if result._task_fields['args'].get('_ansible_version'):
self.ansible_version = \
result._task_fields['args'].get('_ansible_version')
if result._task._role:
ansible_role = str(result._task._role)
else:
ansible_role = None
data = {}
data['uuid'] = result._task._uuid
data['session'] = self.session
data['status'] = state
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
'+0000')
data['host'] = self.host
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
data['ansible_version'] = self.ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
data['ansible_role'] = ansible_role
data['ansible_task'] = result._task_fields
data['ansible_result'] = result._result
# This wraps the json payload in and outer json event needed by Splunk
jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True)
jsondata = '{"event":' + jsondata + "}"
open_url(
url,
jsondata,
headers={
'Content-type': 'application/json',
'Authorization': 'Splunk ' + authtoken
},
method='POST'
)
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'splunk'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.start_datetimes = {} # Collect task start times
self.url = None
self.authtoken = None
self.splunk = SplunkHTTPCollectorSource()
def _runtime(self, result):
return (
datetime.utcnow() -
self.start_datetimes[result._task._uuid]
).total_seconds()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.url = self.get_option('url')
if self.url is None:
self.disabled = True
self._display.warning('Splunk HTTP collector source URL was '
'not provided. The Splunk HTTP collector '
'source URL can be provided using the '
'`SPLUNK_URL` environment variable or '
'in the ansible.cfg file.')
self.authtoken = self.get_option('authtoken')
if self.authtoken is None:
self.disabled = True
self._display.warning('Splunk HTTP collector requires an authentication'
'token. The Splunk HTTP collector '
'authentication token can be provided using the '
'`SPLUNK_AUTHTOKEN` environment variable or '
'in the ansible.cfg file.')
def v2_playbook_on_start(self, playbook):
self.splunk.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_playbook_on_handler_task_start(self, task):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_runner_on_ok(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'OK',
result,
self._runtime(result)
)
def v2_runner_on_skipped(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'SKIPPED',
result,
self._runtime(result)
)
def v2_runner_on_failed(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'FAILED',
result,
self._runtime(result)
)
def runner_on_async_failed(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'FAILED',
result,
self._runtime(result)
)
def v2_runner_on_unreachable(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'UNREACHABLE',
result,
self._runtime(result)
)
| gpl-3.0 |
tusharjain95/Babblefire | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
ConanChou/hyde | hydeengine/templatetags/hydetags.py | 35 | 13384 | from datetime import datetime
import operator
import os
import re
import string
from django import template
from django.conf import settings
from django.template import Template
from django.template.loader import render_to_string
from django.template.defaultfilters import truncatewords_html, stringfilter
from django.template.loader_tags import do_include
from django.template import Library
from django.utils.safestring import mark_safe
from hydeengine.file_system import File, Folder
marker_start = "<!-- Hyde::%s::Begin -->"
marker_end = "<!-- Hyde::%s::End -->"
current_referrer = 'current_referrer'
register = Library()
class HydeContextNode(template.Node):
def __init__(self):
pass
def render(self, context):
return ""
@register.tag(name="hyde")
def hyde_context(parser, token):
return HydeContextNode()
@register.tag(name="excerpt")
def excerpt(parser, token):
nodelist = parser.parse(('endexcerpt',))
parser.delete_first_token()
return BracketNode("Excerpt", nodelist)
@register.tag(name="article")
def article(parser, token):
nodelist = parser.parse(('endarticle',))
parser.delete_first_token()
return BracketNode("Article", nodelist)
@register.tag(name="refer")
def refer_page(parser, token):
bits = token.contents.split()
if (len(bits) < 5 or
bits[1] != 'to' or
bits[3] != 'as'):
raise TemplateSyntaxError("Syntax: 'refer to _page_path_ as _namespace_'")
return ReferNode(bits[2], bits[4])
@register.tag(name="reference")
def reference(parser, token):
bits = token.contents.split()
if len(bits) < 2:
raise TemplateSyntaxError("Syntax: 'reference _variable_'")
nodelist = parser.parse(('endreference',))
parser.delete_first_token()
return ReferenceNode(bits[1], nodelist)
class ReferNode(template.Node):
def __init__(self, path, namespace):
self.path = path
self.namespace = namespace
def render(self, context):
original_page = context['page']
self.path = original_page.node.folder.child(self.path.strip('"'))
context.push()
context[current_referrer] = {'namespace': self.namespace}
context['page'] = original_page.node.find_resource(File(self.path))
render_to_string(str(self.path), context)
var = context[current_referrer]
context.pop()
context[self.namespace] = var
return ''
class ReferenceNode(template.Node):
def __init__(self, variable, nodelist):
self.variable = variable
self.nodelist = nodelist
def render(self, context):
rendered_string = self.nodelist.render(context)
if current_referrer in context and context[current_referrer]:
context[current_referrer][self.variable] = rendered_string
return rendered_string
class BracketNode(template.Node):
def __init__(self, marker, nodelist):
self.nodelist = nodelist
self.marker = marker
def render(self, context):
rendered_string = self.nodelist.render(context)
return marker_start % self.marker +\
rendered_string + \
marker_end % self.marker
class LatestExcerptNode(template.Node):
def __init__(self, path, words=50):
self.path = path
self.words = words
def render(self, context):
sitemap_node = None
self.path = self.path.render(context).strip('"')
sitemap_node = context["site"].find_node(Folder(self.path))
if not sitemap_node:
sitemap_node = context["site"]
def later(page1, page2):
return (page1, page2)[page2.created > page1.created]
page = reduce(later, sitemap_node.walk_pages())
rendered = None
rendered = render_to_string(str(page), context)
excerpt_start = marker_start % "Excerpt"
excerpt_end = marker_end % "Excerpt"
start = rendered.find(excerpt_start)
if not start == -1:
context["latest_excerpt_url"] = page.url
context["latest_excerpt_title"] = page.title
start = start + len(excerpt_start)
end = rendered.find(excerpt_end, start)
return truncatewords_html(rendered[start:end], self.words)
else:
return ""
class RecentPostsNode(template.Node):
def __init__(self, var='recent_posts', count=5, node=None, categories=None):
self.var = var
self.count = count
self.node = node
self.categories = categories
def render(self, context):
if not self.node:
self.node = context['site']
else:
self.node = self.node.resolve(context)
if not self.count == 5:
self.count = self.count.render(context)
if not self.var == 'recent_posts':
self.var = self.var.render(context)
category_filter = None
if not self.categories is None:
category_filter = re.compile(self.categories)
if (not hasattr(self.node, 'complete_page_list') or
not self.node.complete_page_list):
complete_page_list = sorted(
self.node.walk_pages(),
key=operator.attrgetter("created"), reverse=True)
complete_page_list = filter(lambda page: page.display_in_list, complete_page_list)
self.node.complete_page_list = complete_page_list
if category_filter is None:
context[self.var] = self.node.complete_page_list[:int(self.count)]
else:
posts = filter(lambda page: page.display_in_list and \
reduce(lambda c1,c2: c1 or category_filter.match(c2) is not None, \
hasattr(page, 'categories') and page.categories or [], False), self.node.complete_page_list)
context[self.var] = posts[:int(self.count)]
return ''
@register.tag(name="recent_posts")
def recent_posts(parser, token):
tokens = token.split_contents()
count = 5
node = None
categories = None
var = 'recent_posts'
if len(tokens) > 1:
var = Template(tokens[1])
if len(tokens) > 2:
count = Template(tokens[2])
if len(tokens) > 3:
node = parser.compile_filter(tokens[3])
if len(tokens) > 4:
categories = tokens[4]
return RecentPostsNode(var, count, node, categories)
@register.tag(name="latest_excerpt")
def latest_excerpt(parser, token):
tokens = token.split_contents()
path = None
words = 50
if len(tokens) > 1:
path = Template(tokens[1])
if len(tokens) > 2:
words = int(tokens[2])
return LatestExcerptNode(path, words)
@register.tag(name="render_excerpt")
def render_excerpt(parser, token):
tokens = token.split_contents()
path = None
words = 50
if len(tokens) > 1:
path = parser.compile_filter(tokens[1])
if len(tokens) > 2:
words = int(tokens[2])
return RenderExcerptNode(path, words)
@register.tag(name="render_article")
def render_article(parser, token):
tokens = token.split_contents()
path = None
if len(tokens) > 1:
path = parser.compile_filter(tokens[1])
return RenderArticleNode(path)
class RenderExcerptNode(template.Node):
def __init__(self, page, words=50):
self.page = page
self.words = words
def render(self, context):
page = self.page.resolve(context)
context["excerpt_url"] = page.url
context["excerpt_title"] = page.title
rendered = get_bracketed_content(context, page, "Excerpt")
return truncatewords_html(rendered, self.words)
class RenderArticleNode(template.Node):
def __init__(self, page):
self.page = page
def render(self, context):
page = self.page.resolve(context)
return get_bracketed_content(context, page, "Article")
def get_bracketed_content(context, page, marker):
rendered = None
original_page = context['page']
context['page'] = page
rendered = render_to_string(str(page), context)
context['page'] = original_page
bracket_start = marker_start % marker
bracket_end = marker_end % marker
start = rendered.find(bracket_start)
if not start == -1:
start = start + len(bracket_start)
end = rendered.find(bracket_end, start)
return rendered[start:end]
return ""
def hyde_thumbnail(url):
postfix = getattr(settings, 'THUMBNAIL_FILENAME_POSTFIX', '-thumb')
path, ext = url.rsplit('.', 1)
return ''.join([path, postfix, '.', ext])
register.filter(stringfilter(hyde_thumbnail))
@register.filter
def value_for_key(dictionary, key):
if not dictionary:
return ""
if not dictionary.has_key(key):
return ""
value = dictionary[key]
return value
@register.filter
def xmldatetime(dt):
if not dt:
dt = datetime.now()
zprefix = "Z"
tz = dt.strftime("%z")
if tz:
zprefix = tz[:3] + ":" + tz[3:]
return dt.strftime("%Y-%m-%dT%H:%M:%S") + zprefix
@register.filter
def remove_date_prefix(slug, sep="-"):
expr = sep.join([r"\d{2,4}"] * 3 + ["(.*)"])
match = re.match(expr, slug)
if not match:
return slug
else:
return match.group(0)
@register.filter
def unslugify(slug):
words = slug.replace("_", " ").\
replace("-", " ").\
replace(".", "").split()
return ' '.join(map(lambda str: str.capitalize(), words))
@register.tag(name="hyde_listing_page_rewrite_rules")
def hyde_listing_page_rewrite_rules(parser, token):
"""Prints the Apache Mod_Rewrite RewriteRules for clean urls for pages in
LISTING_PAGE_NAMES. These rules are designed to be placed in a .htaccess
file; they have not been tested inside of httpd.conf
This only generates RewriteRules; it does not enable url rewriting or set
RewriteBase.
"""
return RenderHydeListingPageRewriteRulesNode()
LPN_REWRITE_RULE = string.Template(\
r"""
RewriteCond %{REQUEST_FILENAME}/${name}.html -f
RewriteRule ^(.*) $1/${name}.html
"""
)
class RenderHydeListingPageRewriteRulesNode(template.Node):
def render(self, context):
if not settings.LISTING_PAGE_NAMES:
return ''
rules = [] # For LISTING_PAGE_NAMES listings
for name in settings.LISTING_PAGE_NAMES:
rules.append(LPN_REWRITE_RULE.safe_substitute( \
{'name': name}))
return \
"### BEGIN GENERATED REWRITE RULES ####\n" \
+ ''.join(rules) \
+ "\n#### END GENERATED REWRITE RULES ####"
class IncludeTextNode(template.Node):
def __init__(self, include_node):
self.include_node = include_node
def render(self, context):
try:
import markdown
import typogrify
except ImportError:
print u"`includetext` requires Markdown and Typogrify."
raise
output = self.include_node.render(context)
output = markdown.markdown(output)
output = typogrify.typogrify(output)
return output
@register.tag(name="includetext")
def includetext(parser, token):
return IncludeTextNode(do_include(parser, token))
class RecentResourcesNode(template.Node):
def __init__(self, tag_name, count=0, page='page', var_name='resources'):
self.tag_name = tag_name
self.count = int(count)
self.page = template.Variable(page)
self.var_name = var_name
def render(self, context):
page = self.page.resolve(context)
resources = page is not None and page.node.media or []
if self.count:
resources = resources[:self.count]
context[self.var_name] = resources
return ''
@register.tag(name='recent_resources')
def recent_resources(parser, token):
args = list(token.split_contents())
kwargs = {}
if len(args) >= 3 and args[-2] == 'as':
kwargs['var_name'] = args.pop(-1)
args.pop(-1)
return RecentResourcesNode(*args, **kwargs)
class RenderNode(template.Node):
def __init__(self, template_path, node_list=None, data=None):
self.template_path = template_path
self.node_list = node_list
self.data = data
def render(self, context):
if self.node_list:
text = self.node_list.render(context)
import yaml
self.data = yaml.load(text)
else:
self.data = self.data.resolve(context)
context.push()
context['data'] = self.data
out = render_to_string(self.template_path, context)
context.pop()
return out
@register.tag(name='render')
def render(parser, token):
bits = token.contents.split()
if len(bits) < 2:
raise TemplateSyntaxError("Syntax: {% render _template_ %}YAML{% endrender %}'")
if ((len(bits) > 2 and len(bits) < 4) or (len(bits) == 4 and bits[2] != "with")):
raise TemplateSyntaxError("Syntax: {% render _template_ with var_data %}'")
template_path = bits[1]
nodelist = None
data = None
if (len(bits) == 2):
nodelist = parser.parse(('endrender',))
parser.delete_first_token()
else:
data = template.Variable(bits[3])
return RenderNode(template_path, node_list=nodelist, data=data)
| mit |
mgr0dzicki/python-neo | neo/core/baseneo.py | 6 | 14948 | # -*- coding: utf-8 -*-
"""
This module defines :class:`BaseNeo`, the abstract base class
used by all :module:`neo.core` classes.
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
from datetime import datetime, date, time, timedelta
from decimal import Decimal
import logging
from numbers import Number
import numpy as np
ALLOWED_ANNOTATION_TYPES = (int, float, complex,
str, bytes,
type(None),
datetime, date, time, timedelta,
Number, Decimal,
np.number, np.bool_)
# handle both Python 2 and Python 3
try:
ALLOWED_ANNOTATION_TYPES += (long, unicode)
except NameError:
pass
try:
basestring
except NameError:
basestring = str
logger = logging.getLogger("Neo")
class MergeError(Exception):
pass
def _check_annotations(value):
"""
Recursively check that value is either of a "simple" type (number, string,
date/time) or is a (possibly nested) dict, list or numpy array containing
only simple types.
"""
if isinstance(value, np.ndarray):
if not issubclass(value.dtype.type, ALLOWED_ANNOTATION_TYPES):
raise ValueError("Invalid annotation. NumPy arrays with dtype %s"
"are not allowed" % value.dtype.type)
elif isinstance(value, dict):
for element in value.values():
_check_annotations(element)
elif isinstance(value, (list, tuple)):
for element in value:
_check_annotations(element)
elif not isinstance(value, ALLOWED_ANNOTATION_TYPES):
raise ValueError("Invalid annotation. Annotations of type %s are not"
"allowed" % type(value))
def merge_annotation(a, b):
"""
First attempt at a policy for merging annotations (intended for use with
parallel computations using MPI). This policy needs to be discussed
further, or we could allow the user to specify a policy.
Current policy:
For arrays or lists: concatenate
For dicts: merge recursively
For strings: concatenate with ';'
Otherwise: fail if the annotations are not equal
"""
assert type(a) == type(b), 'type(%s) %s != type(%s) %s' % (a, type(a),
b, type(b))
if isinstance(a, dict):
return merge_annotations(a, b)
elif isinstance(a, np.ndarray): # concatenate b to a
return np.append(a, b)
elif isinstance(a, list): # concatenate b to a
return a + b
elif isinstance(a, basestring):
if a == b:
return a
else:
return a + ";" + b
else:
assert a == b, '%s != %s' % (a, b)
return a
def merge_annotations(A, B):
"""
Merge two sets of annotations.
Merging follows these rules:
All keys that are in A or B, but not both, are kept.
For keys that are present in both:
For arrays or lists: concatenate
For dicts: merge recursively
For strings: concatenate with ';'
Otherwise: warn if the annotations are not equal
"""
merged = {}
for name in A:
if name in B:
try:
merged[name] = merge_annotation(A[name], B[name])
except BaseException as exc:
#exc.args += ('key %s' % name,)
#raise
merged[name] = "MERGE CONFLICT" # temporary hack
else:
merged[name] = A[name]
for name in B:
if name not in merged:
merged[name] = B[name]
logger.debug("Merging annotations: A=%s B=%s merged=%s", A, B, merged)
return merged
def _reference_name(class_name):
"""
Given the name of a class, return an attribute name to be used for
references to instances of that class.
For example, a Segment object has a parent Block object, referenced by
`segment.block`. The attribute name `block` is obtained by calling
`_container_name("Block")`.
"""
name_map = {
"ChannelIndex": "channel_index"
}
return name_map.get(class_name, class_name.lower())
def _container_name(class_name):
"""
Given the name of a class, return an attribute name to be used for
lists (or other containers) containing instances of that class.
For example, a Block object contains a list of Segment objects,
referenced by `block.segments`. The attribute name `segments` is
obtained by calling `_container_name_plural("Segment")`.
"""
name_map = {
"ChannelIndex": "channel_indexes"
}
return name_map.get(class_name, _reference_name(class_name) + 's')
class BaseNeo(object):
"""
This is the base class from which all Neo objects inherit.
This class implements support for universally recommended arguments,
and also sets up the :attr:`annotations` dict for additional arguments.
Each class can define one or more of the following class attributes:
:_single_parent_objects: Neo objects that can be parents of this
object. This attribute is used in cases where
only one parent of this class is allowed.
An instance attribute named
class.__name__.lower() will be automatically
defined to hold this parent and will be
initialized to None.
:_multi_parent_objects: Neo objects that can be parents of this
object. This attribute is used in cases where
multiple parents of this class is allowed.
An instance attribute named
class.__name__.lower()+'s' will be
automatically defined to hold this parent and
will be initialized to an empty list.
:_necessary_attrs: A list of tuples containing the attributes that the
class must have. The tuple can have 2-4 elements.
The first element is the attribute name.
The second element is the attribute type.
The third element is the number of dimensions
(only for numpy arrays and quantities).
The fourth element is the dtype of array
(only for numpy arrays and quantities).
This does NOT include the attributes holding the
parents or children of the object.
:_recommended_attrs: A list of tuples containing the attributes that
the class may optionally have. It uses the same
structure as :_necessary_attrs:
:_repr_pretty_attrs_keys_: The names of attributes printed when
pretty-printing using iPython.
The following helper properties are available:
:_parent_objects: All parent objects.
:_single_parent_objects: + :_multi_parent_objects:
:_single_parent_containers: The names of the container attributes used
to store :_single_parent_objects:
:_multi_parent_containers: The names of the container attributes used
to store :_multi_parent_objects:
:_parent_containers: All parent container attributes.
:_single_parent_containers: +
:_multi_parent_containers:
:parents: All objects that are parents of the current object.
:_all_attrs: All required and optional attributes.
:_necessary_attrs: + :_recommended_attrs:
The following "universal" methods are available:
:__init__: Grabs the universally recommended arguments :attr:`name`,
:attr:`file_origin`, and :attr:`description` and stores them as
attributes.
Also takes every additional argument (that is, every argument
that is not handled by :class:`BaseNeo` or the child class), and
puts in the dict :attr:`annotations`.
:annotate(**args): Updates :attr:`annotations` with keyword/value
pairs.
:merge(**args): Merge the contents of another object into this one.
The merge method implemented here only merges
annotations (see :merge_annotations:).
Subclasses should implementt their own merge rules.
:merge_annotations(**args): Merge the :attr:`annotations` of another
object into this one.
Each child class should:
0) describe its parents (if any) and attributes in the relevant
class attributes. :_recommended_attrs: should append
BaseNeo._recommended_attrs to the end.
1) call BaseNeo.__init__(self, name=name, description=description,
file_origin=file_origin, **annotations)
with the universal recommended arguments, plus optional annotations
2) process its required arguments in its __new__ or __init__ method
3) process its non-universal recommended arguments (in its __new__ or
__init__ method
Non-keyword arguments should only be used for required arguments.
The required and recommended arguments for each child class (Neo object)
are specified in the _necessary_attrs and _recommended_attrs attributes and
documentation for the child object.
"""
# these attributes control relationships, they need to be
# specified in each child class
# Parent objects whose children can have a single parent
_single_parent_objects = ()
# Parent objects whose children can have multiple parents
_multi_parent_objects = ()
# Attributes that an instance is requires to have defined
_necessary_attrs = ()
# Attributes that an instance may or may have defined
_recommended_attrs = (('name', str),
('description', str),
('file_origin', str))
# Attributes that are used for pretty-printing
_repr_pretty_attrs_keys_ = ("name", "description", "annotations")
def __init__(self, name=None, description=None, file_origin=None,
**annotations):
"""
This is the base constructor for all Neo objects.
Stores universally recommended attributes and creates
:attr:`annotations` from additional arguments not processed by
:class:`BaseNeo` or the child class.
"""
# create `annotations` for additional arguments
_check_annotations(annotations)
self.annotations = annotations
# these attributes are recommended for all objects.
self.name = name
self.description = description
self.file_origin = file_origin
# initialize parent containers
for parent in self._single_parent_containers:
setattr(self, parent, None)
for parent in self._multi_parent_containers:
setattr(self, parent, [])
def annotate(self, **annotations):
"""
Add annotations (non-standardized metadata) to a Neo object.
Example:
>>> obj.annotate(key1=value0, key2=value1)
>>> obj.key2
value2
"""
_check_annotations(annotations)
self.annotations.update(annotations)
def _has_repr_pretty_attrs_(self):
return any(getattr(self, k) for k in self._repr_pretty_attrs_keys_)
def _repr_pretty_attrs_(self, pp, cycle):
first = True
for key in self._repr_pretty_attrs_keys_:
value = getattr(self, key)
if value:
if first:
first = False
else:
pp.breakable()
with pp.group(indent=1):
pp.text("{0}: ".format(key))
pp.pretty(value)
def _repr_pretty_(self, pp, cycle):
"""
Handle pretty-printing the :class:`BaseNeo`.
"""
pp.text(self.__class__.__name__)
if self._has_repr_pretty_attrs_():
pp.breakable()
self._repr_pretty_attrs_(pp, cycle)
@property
def _single_parent_containers(self):
"""
Containers for parent objects whose children can have a single parent.
"""
return tuple([_reference_name(parent) for parent in
self._single_parent_objects])
@property
def _multi_parent_containers(self):
"""
Containers for parent objects whose children can have multiple parents.
"""
return tuple([_container_name(parent) for parent in
self._multi_parent_objects])
@property
def _parent_objects(self):
"""
All types for parent objects.
"""
return self._single_parent_objects + self._multi_parent_objects
@property
def _parent_containers(self):
"""
All containers for parent objects.
"""
return self._single_parent_containers + self._multi_parent_containers
@property
def parents(self):
"""
All parent objects storing the current object.
"""
single = [getattr(self, attr) for attr in
self._single_parent_containers]
multi = [list(getattr(self, attr)) for attr in
self._multi_parent_containers]
return tuple(single + sum(multi, []))
@property
def _all_attrs(self):
"""
Returns a combination of all required and recommended
attributes.
"""
return self._necessary_attrs + self._recommended_attrs
def merge_annotations(self, other):
"""
Merge annotations from the other object into this one.
Merging follows these rules:
All keys that are in the either object, but not both, are kept.
For keys that are present in both objects:
For arrays or lists: concatenate the two arrays
For dicts: merge recursively
For strings: concatenate with ';'
Otherwise: fail if the annotations are not equal
"""
merged_annotations = merge_annotations(self.annotations,
other.annotations)
self.annotations.update(merged_annotations)
def merge(self, other):
"""
Merge the contents of another object into this one.
See :meth:`merge_annotations` for details of the merge operation.
"""
self.merge_annotations(other)
| bsd-3-clause |
cosmo-ethz/hope | hope/config.py | 1 | 1137 | # Copyright (c) 2013 ETH Zurich, Institute of Astronomy, Lukas Gamper <lukas.gamper@usystems.ch>
from __future__ import print_function, division, absolute_import, unicode_literals
from hope.options import get_cxxflags
# Additional compiler flags, formated as array of strings
cxxflags = get_cxxflags()
"""
List of c++ compiler flags. Normally hope does determing the right flags itself.
"""
#TODO implement
prefix = ".hope"
"""
Prefix of the folder hope saves all data in.
"""
verbose = False
"""
Print a intermediate representation of each function during compilation.
"""
optimize = False
"""
Use '''sympy''' to simplify expression and exptract common subexpression detection
"""
keeptemp = False
"""
Keep the intermediate c++ source and compiler output generated during compilation.
"""
rangecheck = False
"""
Check if indeces are out of bounds
"""
hopeless = False
"""
Disable hope. If hope.config.hopeless is True, hope.jit return the original function.
Use this function for debug purpos
"""
# make readable cpp file, but typecasting is not exactly the same as in numpy - this flag is private
_readablecxx = False
| gpl-3.0 |
albertjan/pypyjs-presentation | assets/js/pypy.js-0.3.1/lib/modules/distutils/dist.py | 175 | 50049 | """distutils.dist
Provides the Distribution class, which represents the module distribution
being built/installed/distributed.
"""
__revision__ = "$Id$"
import sys, os, re
from email import message_from_file
try:
import warnings
except ImportError:
warnings = None
from distutils.errors import (DistutilsOptionError, DistutilsArgError,
DistutilsModuleError, DistutilsClassError)
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# Regex to define acceptable Distutils command names. This is not *quite*
# the same as a Python NAME -- I don't allow leading underscores. The fact
# that they're very similar is no coincidence; the default naming scheme is
# to look for a Python module named after the command.
command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
class Distribution:
"""The core of the Distutils. Most of the work hiding behind 'setup'
is really done within a Distribution instance, which farms the work out
to the Distutils commands specified on the command line.
Setup scripts will almost never instantiate Distribution directly,
unless the 'setup()' function is totally inadequate to their needs.
However, it is conceivable that a setup script might wish to subclass
Distribution for some specialized purpose, and then pass the subclass
to 'setup()' as the 'distclass' keyword argument. If so, it is
necessary to respect the expectations that 'setup' has of Distribution.
See the code for 'setup()', in core.py, for details.
"""
# 'global_options' describes the command-line options that may be
# supplied to the setup script prior to any actual commands.
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
# these global options. This list should be kept to a bare minimum,
# since every global option is also valid as a command option -- and we
# don't want to pollute the commands with too many options that they
# have minimal control over.
# The fourth entry for verbose means that it can be repeated.
global_options = [('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
('no-user-cfg', None,
'ignore pydistutils.cfg in your home directory'),
]
# 'common_usage' is a short (2-3 line) string describing the common
# usage of the setup script.
common_usage = """\
Common commands: (see '--help-commands' for more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
# options that are not propagated to the commands
display_options = [
('help-commands', None,
"list all available commands"),
('name', None,
"print package name"),
('version', 'V',
"print package version"),
('fullname', None,
"print <package name>-<version>"),
('author', None,
"print the author's name"),
('author-email', None,
"print the author's email address"),
('maintainer', None,
"print the maintainer's name"),
('maintainer-email', None,
"print the maintainer's email address"),
('contact', None,
"print the maintainer's name if known, else the author's"),
('contact-email', None,
"print the maintainer's email address if known, else the author's"),
('url', None,
"print the URL for this package"),
('license', None,
"print the license of the package"),
('licence', None,
"alias for --license"),
('description', None,
"print the package description"),
('long-description', None,
"print the long package description"),
('platforms', None,
"print the list of platforms"),
('classifiers', None,
"print the list of classifiers"),
('keywords', None,
"print the list of keywords"),
('provides', None,
"print the list of packages/modules provided"),
('requires', None,
"print the list of packages/modules required"),
('obsoletes', None,
"print the list of packages/modules made obsolete")
]
display_option_names = map(lambda x: translate_longopt(x[0]),
display_options)
# negative options are options that exclude other options
negative_opt = {'quiet': 'verbose'}
# -- Creation/initialization methods -------------------------------
def __init__ (self, attrs=None):
"""Construct a new Distribution instance: initialize all the
attributes of a Distribution, and then use 'attrs' (a dictionary
mapping attribute names to values) to assign some of those
attributes their "real" values. (Any attributes not mentioned in
'attrs' will be assigned to some null value: 0, None, an empty list
or dictionary, etc.) Most importantly, initialize the
'command_obj' attribute to the empty dictionary; this will be
filled in with real command objects by 'parse_command_line()'.
"""
# Default values for our command-line options
self.verbose = 1
self.dry_run = 0
self.help = 0
for attr in self.display_option_names:
setattr(self, attr, 0)
# Store the distribution meta-data (name, version, author, and so
# forth) in a separate object -- we're getting to have enough
# information here (and enough command-line options) that it's
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
# object in a sneaky and underhanded (but efficient!) way.
self.metadata = DistributionMetadata()
for basename in self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
# 'cmdclass' maps command names to class objects, so we
# can 1) quickly figure out which class to instantiate when
# we need to create a new command object, and 2) have a way
# for the setup script to override command classes
self.cmdclass = {}
# 'command_packages' is a list of packages in which commands
# are searched for. The factory for command 'foo' is expected
# to be named 'foo' in the module 'foo' in one of the packages
# named here. This list is searched from the left; an error
# is raised if no named package provides the command being
# searched for. (Always access using get_command_packages().)
self.command_packages = None
# 'script_name' and 'script_args' are usually set to sys.argv[0]
# and sys.argv[1:], but they can be overridden when the caller is
# not necessarily a setup script run from the command-line.
self.script_name = None
self.script_args = None
# 'command_options' is where we store command options between
# parsing them (from config files, the command-line, etc.) and when
# they are actually needed -- ie. when the command in question is
# instantiated. It is a dictionary of dictionaries of 2-tuples:
# command_options = { command_name : { option : (source, value) } }
self.command_options = {}
# 'dist_files' is the list of (command, pyversion, file) that
# have been created by any dist commands run so far. This is
# filled regardless of whether the run is dry or not. pyversion
# gives sysconfig.get_python_version() if the dist file is
# specific to a Python version, 'any' if it is good for all
# Python versions on the target platform, and '' for a source
# file. pyversion should not be used to specify minimum or
# maximum required Python versions; use the metainfo for that
# instead.
self.dist_files = []
# These options are really the business of various commands, rather
# than of the Distribution itself. We provide aliases for them in
# Distribution as a convenience to the developer.
self.packages = None
self.package_data = {}
self.package_dir = None
self.py_modules = None
self.libraries = None
self.headers = None
self.ext_modules = None
self.ext_package = None
self.include_dirs = None
self.extra_path = None
self.scripts = None
self.data_files = None
self.password = ''
# And now initialize bookkeeping stuff that can't be supplied by
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
# class is a singleton.
self.command_obj = {}
# 'have_run' maps command names to boolean values; it keeps track
# of whether we have actually run a particular command, to make it
# cheap to "run" a command whenever we think we might need to -- if
# it's already been done, no need for expensive filesystem
# operations, we just check the 'have_run' dictionary and carry on.
# It's only safe to query 'have_run' for a command class that has
# been instantiated -- a false value will be inserted when the
# command object is created, and replaced with a true value when
# the command is successfully run. Thus it's probably best to use
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
# the setup script) to possibly override any or all of these
# distribution options.
if attrs:
# Pull out the set of command options and work on them
# specifically. Note that this order guarantees that aliased
# command options will override any supplied redundantly
# through the general options dictionary.
options = attrs.get('options')
if options is not None:
del attrs['options']
for (command, cmd_options) in options.items():
opt_dict = self.get_option_dict(command)
for (opt, val) in cmd_options.items():
opt_dict[opt] = ("setup script", val)
if 'licence' in attrs:
attrs['license'] = attrs['licence']
del attrs['licence']
msg = "'licence' distribution option is deprecated; use 'license'"
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# Now work on the rest of the attributes. Any attribute that's
# not already defined is invalid!
for (key, val) in attrs.items():
if hasattr(self.metadata, "set_" + key):
getattr(self.metadata, "set_" + key)(val)
elif hasattr(self.metadata, key):
setattr(self.metadata, key, val)
elif hasattr(self, key):
setattr(self, key, val)
else:
msg = "Unknown distribution option: %s" % repr(key)
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# no-user-cfg is handled before other command line args
# because other args override the config files, and this
# one is needed before we can load the config files.
# If attrs['script_args'] wasn't passed, assume false.
#
# This also make sure we just look at the global options
self.want_user_cfg = True
if self.script_args is not None:
for arg in self.script_args:
if not arg.startswith('-'):
break
if arg == '--no-user-cfg':
self.want_user_cfg = False
break
self.finalize_options()
def get_option_dict(self, command):
"""Get the option dictionary for a given command. If that
command's option dictionary hasn't been created yet, then create it
and return the new dictionary; otherwise, return the existing
option dictionary.
"""
dict = self.command_options.get(command)
if dict is None:
dict = self.command_options[command] = {}
return dict
def dump_option_dicts(self, header=None, commands=None, indent=""):
from pprint import pformat
if commands is None: # dump all command option dicts
commands = self.command_options.keys()
commands.sort()
if header is not None:
self.announce(indent + header)
indent = indent + " "
if not commands:
self.announce(indent + "no commands known yet")
return
for cmd_name in commands:
opt_dict = self.command_options.get(cmd_name)
if opt_dict is None:
self.announce(indent +
"no option dict for '%s' command" % cmd_name)
else:
self.announce(indent +
"option dict for '%s' command:" % cmd_name)
out = pformat(opt_dict)
for line in out.split('\n'):
self.announce(indent + " " + line)
# -- Config file finding/parsing methods ---------------------------
def find_config_files(self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
if DEBUG:
self.announce("using config files: %s" % ', '.join(files))
return files
def parse_config_files(self, filenames=None):
from ConfigParser import ConfigParser
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__':
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError, msg:
raise DistutilsOptionError, msg
# -- Command-line parsing methods ----------------------------------
def parse_command_line(self):
"""Parse the setup script's command line, taken from the
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
-- see 'setup()' in core.py). This list is first processed for
"global options" -- options that set attributes of the Distribution
instance. Then, it is alternately scanned for Distutils commands
and options for that command. Each new command terminates the
options for the previous command. The allowed options for a
command are determined by the 'user_options' attribute of the
command class -- thus, we have to be able to load command classes
in order to parse the command line. Any error in that 'options'
attribute raises DistutilsGetoptError; any error on the
command-line raises DistutilsArgError. If no Distutils commands
were found on the command line, raises DistutilsArgError. Return
true if command-line was successfully parsed and we should carry
on with executing commands; false if no errors but we shouldn't
execute commands (currently, this only happens if user asks for
help).
"""
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we have loaded the command class, which doesn't happen
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found -- an end-user error
if not self.commands:
raise DistutilsArgError, "no commands supplied"
# All is well: return true
return 1
def _get_toplevel_options(self):
"""Return the non-display options recognized at the top level.
This includes options that are recognized *only* at the top
level as well as options recognized for commands.
"""
return self.global_options + [
("command-packages=", None,
"list of packages that provide distutils commands"),
]
def _parse_command_opts(self, parser, args):
"""Parse the command-line options for a single command.
'parser' must be a FancyGetopt instance; 'args' must be the list
of arguments, starting with the current command (whose options
we are about to parse). Returns a new version of 'args' with
the next command at the front of the list; will be the empty
list if there are no more commands on the command line. Returns
None if the user asked for help on this command.
"""
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
raise SystemExit, "invalid command name '%s'" % command
self.commands.append(command)
# Dig up the command class that implements this command, so we
# 1) know that it's a valid command, and 2) know which options
# it takes.
try:
cmd_class = self.get_command_class(command)
except DistutilsModuleError, msg:
raise DistutilsArgError, msg
# Require that the command class be derived from Command -- want
# to be sure that the basic "command" interface is implemented.
if not issubclass(cmd_class, Command):
raise DistutilsClassError, \
"command class %s must subclass Command" % cmd_class
# Also make sure that the command object provides a list of its
# known options.
if not (hasattr(cmd_class, 'user_options') and
isinstance(cmd_class.user_options, list)):
raise DistutilsClassError, \
("command class %s must provide " +
"'user_options' attribute (a list of tuples)") % \
cmd_class
# If the command class has a list of negative alias options,
# merge it in with the global negative aliases.
negative_opt = self.negative_opt
if hasattr(cmd_class, 'negative_opt'):
negative_opt = negative_opt.copy()
negative_opt.update(cmd_class.negative_opt)
# Check for help_options in command class. They have a different
# format (tuple of four) so we need to preprocess them here.
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_options = fix_help_options(cmd_class.help_options)
else:
help_options = []
# All commands support the global options too, just by adding
# in 'global_options'.
parser.set_option_table(self.global_options +
cmd_class.user_options +
help_options)
parser.set_negative_aliases(negative_opt)
(args, opts) = parser.getopt(args[1:])
if hasattr(opts, 'help') and opts.help:
self._show_help(parser, display_options=0, commands=[cmd_class])
return
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_option_found=0
for (help_option, short, desc, func) in cmd_class.help_options:
if hasattr(opts, parser.get_attr_name(help_option)):
help_option_found=1
if hasattr(func, '__call__'):
func()
else:
raise DistutilsClassError(
"invalid help function %r for help option '%s': "
"must be a callable object (function, etc.)"
% (func, help_option))
if help_option_found:
return
# Put the options from the command-line into their official
# holding pen, the 'command_options' dictionary.
opt_dict = self.get_option_dict(command)
for (name, value) in vars(opts).items():
opt_dict[name] = ("command line", value)
return args
def finalize_options(self):
"""Set final values for all the options on the Distribution
instance, analogous to the .finalize_options() method of Command
objects.
"""
for attr in ('keywords', 'platforms'):
value = getattr(self.metadata, attr)
if value is None:
continue
if isinstance(value, str):
value = [elm.strip() for elm in value.split(',')]
setattr(self.metadata, attr, value)
def _show_help(self, parser, global_options=1, display_options=1,
commands=[]):
"""Show help for the setup script command-line in the form of
several lists of command-line options. 'parser' should be a
FancyGetopt instance; do not expect it to be returned in the
same state, as its option table will be reset to make it
generate the correct help text.
If 'global_options' is true, lists the global options:
--verbose, --dry-run, etc. If 'display_options' is true, lists
the "display-only" options: --name, --version, etc. Finally,
lists per-command help for every command name or command class
in 'commands'.
"""
# late import because of mutual dependence between these modules
from distutils.core import gen_usage
from distutils.cmd import Command
if global_options:
if display_options:
options = self._get_toplevel_options()
else:
options = self.global_options
parser.set_option_table(options)
parser.print_help(self.common_usage + "\nGlobal options:")
print('')
if display_options:
parser.set_option_table(self.display_options)
parser.print_help(
"Information display options (just display " +
"information, ignore any commands)")
print('')
for command in self.commands:
if isinstance(command, type) and issubclass(command, Command):
klass = command
else:
klass = self.get_command_class(command)
if (hasattr(klass, 'help_options') and
isinstance(klass.help_options, list)):
parser.set_option_table(klass.user_options +
fix_help_options(klass.help_options))
else:
parser.set_option_table(klass.user_options)
parser.print_help("Options for '%s' command:" % klass.__name__)
print('')
print(gen_usage(self.script_name))
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
from distutils.core import gen_usage
# User just wants a list of commands -- we'll print it out and stop
# processing now (ie. if they ran "setup --help-commands foo bar",
# we ignore "foo bar").
if self.help_commands:
self.print_commands()
print('')
print(gen_usage(self.script_name))
return 1
# If user supplied any of the "display metadata" options, then
# display that metadata in the order in which the user supplied the
# metadata options.
any_display_options = 0
is_display_option = {}
for option in self.display_options:
is_display_option[option[0]] = 1
for (opt, val) in option_order:
if val and is_display_option.get(opt):
opt = translate_longopt(opt)
value = getattr(self.metadata, "get_"+opt)()
if opt in ['keywords', 'platforms']:
print(','.join(value))
elif opt in ('classifiers', 'provides', 'requires',
'obsoletes'):
print('\n'.join(value))
else:
print(value)
any_display_options = 1
return any_display_options
def print_command_list(self, commands, header, max_length):
"""Print a subset of the list of all commands -- used by
'print_commands()'.
"""
print(header + ":")
for cmd in commands:
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
print(" %-*s %s" % (max_length, cmd, description))
def print_commands(self):
"""Print out a help message listing all available commands with a
description of each. The list is divided into "standard commands"
(listed in distutils.command.__all__) and "extra commands"
(mentioned in self.cmdclass, but not a standard command). The
descriptions come from the command class attribute
'description'.
"""
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
max_length = 0
for cmd in (std_commands + extra_commands):
if len(cmd) > max_length:
max_length = len(cmd)
self.print_command_list(std_commands,
"Standard commands",
max_length)
if extra_commands:
print
self.print_command_list(extra_commands,
"Extra commands",
max_length)
def get_command_list(self):
"""Get a list of (command, description) tuples.
The list is divided into "standard commands" (listed in
distutils.command.__all__) and "extra commands" (mentioned in
self.cmdclass, but not a standard command). The descriptions come
from the command class attribute 'description'.
"""
# Currently this is only used on Mac OS, for the Mac-only GUI
# Distutils interface (by Jack Jansen)
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
rv = []
for cmd in (std_commands + extra_commands):
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
rv.append((cmd, description))
return rv
# -- Command class/object methods ----------------------------------
def get_command_packages(self):
"""Return a list of packages from which commands are loaded."""
pkgs = self.command_packages
if not isinstance(pkgs, list):
if pkgs is None:
pkgs = ''
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
if "distutils.command" not in pkgs:
pkgs.insert(0, "distutils.command")
self.command_packages = pkgs
return pkgs
def get_command_class(self, command):
"""Return the class that implements the Distutils command named by
'command'. First we check the 'cmdclass' dictionary; if the
command is mentioned there, we fetch the class object from the
dictionary and return it. Otherwise we load the command module
("distutils.command." + command) and fetch the command class from
the module. The loaded class is also stored in 'cmdclass'
to speed future calls to 'get_command_class()'.
Raises DistutilsModuleError if the expected module could not be
found, or if that module does not define the expected class.
"""
klass = self.cmdclass.get(command)
if klass:
return klass
for pkgname in self.get_command_packages():
module_name = "%s.%s" % (pkgname, command)
klass_name = command
try:
__import__ (module_name)
module = sys.modules[module_name]
except ImportError:
continue
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError, \
"invalid command '%s' (no class '%s' in module '%s')" \
% (command, klass_name, module_name)
self.cmdclass[command] = klass
return klass
raise DistutilsModuleError("invalid command '%s'" % command)
def get_command_obj(self, command, create=1):
"""Return the command object for 'command'. Normally this object
is cached on a previous call to 'get_command_obj()'; if no command
object for 'command' is in the cache, then we either create and
return it (if 'create' is true) or return None.
"""
cmd_obj = self.command_obj.get(command)
if not cmd_obj and create:
if DEBUG:
self.announce("Distribution.get_command_obj(): " \
"creating '%s' command object" % command)
klass = self.get_command_class(command)
cmd_obj = self.command_obj[command] = klass(self)
self.have_run[command] = 0
# Set any options that were supplied in config files
# or on the command line. (NB. support for error
# reporting is lame here: any errors aren't reported
# until 'finalize_options()' is called, which means
# we won't report the source of the error.)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
def _set_command_options(self, command_obj, option_dict=None):
"""Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = map(translate_longopt, command_obj.boolean_options)
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError, \
("error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError, msg:
raise DistutilsOptionError, msg
def reinitialize_command(self, command, reinit_subcommands=0):
"""Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
"""
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
# -- Methods that operate on the Distribution ----------------------
def announce(self, msg, level=log.INFO):
log.log(level, msg)
def run_commands(self):
"""Run each command that was seen on the setup script command line.
Uses the list of commands found and cache of command objects
created by 'get_command_obj()'.
"""
for cmd in self.commands:
self.run_command(cmd)
# -- Methods that operate on its Commands --------------------------
def run_command(self, command):
"""Do whatever it takes to run a command (including nothing at all,
if the command has already been run). Specifically: if we have
already created and run the command named by 'command', return
silently without doing anything. If the command named by 'command'
doesn't even have a command object yet, create one. Then invoke
'run()' on that command object (or an existing one).
"""
# Already been here, done that? then return silently.
if self.have_run.get(command):
return
log.info("running %s", command)
cmd_obj = self.get_command_obj(command)
cmd_obj.ensure_finalized()
cmd_obj.run()
self.have_run[command] = 1
# -- Distribution query methods ------------------------------------
def has_pure_modules(self):
return len(self.packages or self.py_modules or []) > 0
def has_ext_modules(self):
return self.ext_modules and len(self.ext_modules) > 0
def has_c_libraries(self):
return self.libraries and len(self.libraries) > 0
def has_modules(self):
return self.has_pure_modules() or self.has_ext_modules()
def has_headers(self):
return self.headers and len(self.headers) > 0
def has_scripts(self):
return self.scripts and len(self.scripts) > 0
def has_data_files(self):
return self.data_files and len(self.data_files) > 0
def is_pure(self):
return (self.has_pure_modules() and
not self.has_ext_modules() and
not self.has_c_libraries())
# -- Metadata query methods ----------------------------------------
# If you're looking for 'get_name()', 'get_version()', and so forth,
# they are defined in a sneaky way: the constructor binds self.get_XXX
# to self.metadata.get_XXX. The actual code is in the
# DistributionMetadata class, below.
class DistributionMetadata:
"""Dummy class to hold the distribution meta-data: name, version,
author, and so forth.
"""
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email", "license", "classifiers",
"download_url",
# PEP 314
"provides", "requires", "obsoletes",
)
def __init__(self, path=None):
if path is not None:
self.read_pkg_file(open(path))
else:
self.name = None
self.version = None
self.author = None
self.author_email = None
self.maintainer = None
self.maintainer_email = None
self.url = None
self.license = None
self.description = None
self.long_description = None
self.keywords = None
self.platforms = None
self.classifiers = None
self.download_url = None
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
def read_pkg_file(self, file):
"""Reads the metadata values from a file object."""
msg = message_from_file(file)
def _read_field(name):
value = msg[name]
if value == 'UNKNOWN':
return None
return value
def _read_list(name):
values = msg.get_all(name, None)
if values == []:
return None
return values
metadata_version = msg['metadata-version']
self.name = _read_field('name')
self.version = _read_field('version')
self.description = _read_field('summary')
# we are filling author only.
self.author = _read_field('author')
self.maintainer = None
self.author_email = _read_field('author-email')
self.maintainer_email = None
self.url = _read_field('home-page')
self.license = _read_field('license')
if 'download-url' in msg:
self.download_url = _read_field('download-url')
else:
self.download_url = None
self.long_description = _read_field('description')
self.description = _read_field('summary')
if 'keywords' in msg:
self.keywords = _read_field('keywords').split(',')
self.platforms = _read_list('platform')
self.classifiers = _read_list('classifier')
# PEP 314 - these fields only exist in 1.1
if metadata_version == '1.1':
self.requires = _read_list('requires')
self.provides = _read_list('provides')
self.obsoletes = _read_list('obsoletes')
else:
self.requires = None
self.provides = None
self.obsoletes = None
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
pkg_info = open(os.path.join(base_dir, 'PKG-INFO'), 'w')
try:
self.write_pkg_file(pkg_info)
finally:
pkg_info.close()
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
self._write_field(file, 'Metadata-Version', version)
self._write_field(file, 'Name', self.get_name())
self._write_field(file, 'Version', self.get_version())
self._write_field(file, 'Summary', self.get_description())
self._write_field(file, 'Home-page', self.get_url())
self._write_field(file, 'Author', self.get_contact())
self._write_field(file, 'Author-email', self.get_contact_email())
self._write_field(file, 'License', self.get_license())
if self.download_url:
self._write_field(file, 'Download-URL', self.download_url)
long_desc = rfc822_escape(self.get_long_description())
self._write_field(file, 'Description', long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
self._write_field(file, 'Keywords', keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
def _write_field(self, file, name, value):
file.write('%s: %s\n' % (name, self._encode_field(value)))
def _write_list (self, file, name, values):
for value in values:
self._write_field(file, name, value)
def _encode_field(self, value):
if value is None:
return None
if isinstance(value, unicode):
return value.encode(PKG_INFO_ENCODING)
return str(value)
# -- Metadata query methods ----------------------------------------
def get_name(self):
return self.name or "UNKNOWN"
def get_version(self):
return self.version or "0.0.0"
def get_fullname(self):
return "%s-%s" % (self.get_name(), self.get_version())
def get_author(self):
return self._encode_field(self.author) or "UNKNOWN"
def get_author_email(self):
return self.author_email or "UNKNOWN"
def get_maintainer(self):
return self._encode_field(self.maintainer) or "UNKNOWN"
def get_maintainer_email(self):
return self.maintainer_email or "UNKNOWN"
def get_contact(self):
return (self._encode_field(self.maintainer) or
self._encode_field(self.author) or "UNKNOWN")
def get_contact_email(self):
return self.maintainer_email or self.author_email or "UNKNOWN"
def get_url(self):
return self.url or "UNKNOWN"
def get_license(self):
return self.license or "UNKNOWN"
get_licence = get_license
def get_description(self):
return self._encode_field(self.description) or "UNKNOWN"
def get_long_description(self):
return self._encode_field(self.long_description) or "UNKNOWN"
def get_keywords(self):
return self.keywords or []
def get_platforms(self):
return self.platforms or ["UNKNOWN"]
def get_classifiers(self):
return self.classifiers or []
def get_download_url(self):
return self.download_url or "UNKNOWN"
# PEP 314
def get_requires(self):
return self.requires or []
def set_requires(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.requires = value
def get_provides(self):
return self.provides or []
def set_provides(self, value):
value = [v.strip() for v in value]
for v in value:
import distutils.versionpredicate
distutils.versionpredicate.split_provision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.obsoletes = value
def fix_help_options(options):
"""Convert a 4-tuple 'help_options' list as found in various command
classes to the 3-tuple form required by FancyGetopt.
"""
new_options = []
for help_tuple in options:
new_options.append(help_tuple[0:3])
return new_options
| unlicense |
blakfeld/ansible-modules-extras | cloud/amazon/ec2_eni.py | 10 | 14272 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance.
version_added: "2.0"
author: Rob White, wimnat [at] gmail.com, @wimnat
options:
eni_id:
description:
- The ID of the ENI
required: false
default: null
instance_id:
description:
- Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'.
required: false
default: null
private_ip_address:
description:
- Private IP address.
required: false
default: null
subnet_id:
description:
- ID of subnet in which to create the ENI. Only required when state=present.
required: true
description:
description:
- Optional description of the ENI.
required: false
default: null
security_groups:
description:
- List of security groups associated with the interface. Only used when state=present.
required: false
default: null
state:
description:
- Create or delete ENI.
required: false
default: present
choices: [ 'present', 'absent' ]
device_index:
description:
- The index of the device for the network interface attachment on the instance.
required: false
default: 0
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
required: false
default: no
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
required: false
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: yes
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: {{ "eni.interface.id" }}
delete_on_termination: true
'''
import time
import xml.etree.ElementTree as ET
import re
try:
import boto.ec2
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_error_message(xml_string):
root = ET.fromstring(xml_string)
for message in root.findall('.//Message'):
return message.text
def get_eni_info(interface):
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, module):
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
changed = False
try:
eni = compare_eni(connection, module)
if eni is None:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError as ex:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
changed = True
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, module):
eni_id = module.params.get("eni_id")
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
changed = False
try:
# Get the eni with the eni_id specified
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if description is not None:
if eni.description != description:
connection.modify_network_interface_attribute(eni.id, "description", description)
changed = True
if security_groups is not None:
if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups):
connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups)
changed = True
if source_dest_check is not None:
if eni.source_dest_check != source_dest_check:
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
changed = True
if delete_on_termination is not None:
if eni.attachment is not None:
if eni.attachment.delete_on_termination is not delete_on_termination:
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
changed = True
else:
module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached")
if eni.attachment is not None and instance_id is None and do_detach is True:
eni.detach(force_detach)
wait_for_eni(eni, "detached")
changed = True
else:
if instance_id is not None:
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
except BotoServerError as e:
print e
module.fail_json(msg=get_error_message(e.args[2]))
eni.update()
module.exit_json(changed=changed, interface=get_eni_info(eni))
def delete_eni(connection, module):
eni_id = module.params.get("eni_id")
force_detach = module.params.get("force_detach")
try:
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if force_detach is True:
if eni.attachment is not None:
eni.detach(force_detach)
# Wait to allow detachment to finish
wait_for_eni(eni, "detached")
eni.update()
eni.delete()
changed = True
else:
eni.delete()
changed = True
module.exit_json(changed=changed)
except BotoServerError as e:
msg = get_error_message(e.args[2])
regex = re.compile('The networkInterface ID \'.*\' does not exist')
if regex.search(msg) is not None:
module.exit_json(changed=False)
else:
module.fail_json(msg=get_error_message(e.args[2]))
def compare_eni(connection, module):
eni_id = module.params.get("eni_id")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
try:
all_eni = connection.get_all_network_interfaces(eni_id)
for eni in all_eni:
remote_security_groups = get_sec_group_list(eni.groups)
if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups):
return eni
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
return None
def get_sec_group_list(groups):
# Build list of remote security groups
remote_security_groups = []
for group in groups:
remote_security_groups.append(group.id.encode())
return remote_security_groups
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
eni_id = dict(default=None),
instance_id = dict(default=None),
private_ip_address = dict(),
subnet_id = dict(),
description = dict(),
security_groups = dict(type='list'),
device_index = dict(default=0, type='int'),
state = dict(default='present', choices=['present', 'absent']),
force_detach = dict(default='no', type='bool'),
source_dest_check = dict(default=None, type='bool'),
delete_on_termination = dict(default=None, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
eni_id = module.params.get("eni_id")
if state == 'present':
if eni_id is None:
if module.params.get("subnet_id") is None:
module.fail_json(msg="subnet_id must be specified when state=present")
create_eni(connection, module)
else:
modify_eni(connection, module)
elif state == 'absent':
if eni_id is None:
module.fail_json(msg="eni_id must be specified")
else:
delete_eni(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| gpl-3.0 |
jrfastab/rocker-net-next | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
joshuajan/odoo | addons/product/_common.py | 111 | 1418 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
import math
def rounding(f, r):
# TODO for trunk: log deprecation warning
# _logger.warning("Deprecated rounding method, please use tools.float_round to round floats.")
return tools.float_round(f, precision_rounding=r)
# TODO for trunk: add rounding method parameter to tools.float_round and use this method as hook
def ceiling(f, r):
if not r:
return f
return math.ceil(f / r) * r
| agpl-3.0 |
ewiseblatt/spinnaker | unittest/buildtool/test_util.py | 2 | 6983 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import tempfile
import unittest
import yaml
from mock import patch
from buildtool import (
check_subprocess_sequence,
check_subprocess,
MetricsManager)
def init_runtime(options=None):
logging.basicConfig(
format='%(levelname).1s %(asctime)s.%(msecs)03d %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
if not options:
class Options(object):
pass
options = Options()
options.metric_name_scope = 'unittest'
options.monitoring_flush_frequency = -1
options.monitoring_system = 'file'
options.monitoring_enabled = False
MetricsManager.startup_metrics(options)
# These are used to define the standard test repositories
NORMAL_SERVICE = 'gate'
NORMAL_REPO = NORMAL_SERVICE
OUTLIER_SERVICE = 'monitoring-daemon'
OUTLIER_REPO = 'spinnaker-monitoring'
EXTRA_REPO = 'spinnaker.github.io'
EXTRA_SERVICE = EXTRA_REPO
STANDARD_GIT_HOST = 'test-gitserver'
OUTLIER_GIT_HOST = STANDARD_GIT_HOST
STANDARD_GIT_OWNER = 'spinnaker'
OUTLIER_GIT_OWNER = 'spinnaker'
BASE_VERSION_TAG = 'version-7.8.9'
PATCH_VERSION_TAG = 'version-7.8.10'
PATCH_VERSION_NUMBER = '7.8.10'
PATCH_BRANCH = 'patch'
UNTAGGED_BRANCH = 'untagged-branch'
def make_standard_git_repo(git_dir):
"""Initialize local standard test repos.
These are used by tests that interact with a git repository.
"""
branch_commits = {'ORIGIN': git_dir}
repo_name = os.path.basename(git_dir)
run_git = lambda cmd: 'git %s' % cmd
os.makedirs(git_dir)
logging.debug('Initializing git repository in "%s"', git_dir)
check_subprocess_sequence(
[
'touch %s-basefile.txt' % repo_name,
run_git('init'),
run_git('add %s-basefile.txt' % repo_name),
run_git('commit -a -m "feat(first): first commit"'),
run_git('tag %s HEAD' % BASE_VERSION_TAG),
],
cwd=git_dir)
branch_commits['master'] = check_subprocess('git rev-parse HEAD', cwd=git_dir)
check_subprocess_sequence(
[
run_git('checkout -b ' + PATCH_BRANCH),
'touch %s-patchfile.txt' % repo_name,
run_git('add %s-patchfile.txt' % repo_name),
run_git('commit -a -m "fix(patch): added patch change"')
],
cwd=git_dir)
branch_commits[PATCH_BRANCH] = check_subprocess(
'git rev-parse HEAD', cwd=git_dir)
check_subprocess_sequence(
[
run_git('checkout master'),
run_git('checkout -b %s-branch' % repo_name),
'touch %s-unique.txt' % repo_name,
run_git('add %s-unique.txt' % repo_name),
run_git('commit -a -m "chore(uniq): unique commit"')
],
cwd=git_dir)
branch_commits['%s-branch' % repo_name] = check_subprocess(
'git rev-parse HEAD', cwd=git_dir)
check_subprocess_sequence(
[
run_git('checkout master'),
run_git('checkout -b %s' % UNTAGGED_BRANCH),
'touch %s-untagged.txt' % repo_name,
run_git('add %s-untagged.txt' % repo_name),
run_git('commit -a -m "chore(uniq): untagged commit"'),
],
cwd=git_dir)
branch_commits[UNTAGGED_BRANCH] = check_subprocess(
'git rev-parse HEAD', cwd=git_dir)
return branch_commits
ALL_STANDARD_TEST_REPO_NAMES = [NORMAL_REPO, OUTLIER_REPO, EXTRA_REPO]
ALL_STANDARD_TEST_BOM_REPO_NAMES = [NORMAL_REPO, OUTLIER_REPO]
def make_all_standard_git_repos(base_dir):
"""Creates git repositories for each of the standard test repos."""
result = {}
path = os.path.join(base_dir, STANDARD_GIT_OWNER, NORMAL_REPO)
result[NORMAL_REPO] = make_standard_git_repo(path)
path = os.path.join(base_dir, STANDARD_GIT_OWNER, EXTRA_REPO)
result[EXTRA_REPO] = make_standard_git_repo(path)
path = os.path.join(base_dir, OUTLIER_GIT_OWNER, OUTLIER_REPO)
result[OUTLIER_REPO] = make_standard_git_repo(path)
return result
class BaseTestFixture(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.debug('BEGIN setUpClass %s', cls.__name__)
cls.base_temp_dir = tempfile.mkdtemp(prefix=cls.__name__)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.base_temp_dir)
def make_test_options(self):
class Options(object):
pass
options = Options()
options.command = self._testMethodName
options.input_dir = os.path.join(self.test_root, 'input_dir')
options.output_dir = os.path.join(self.test_root, 'output_dir')
return options
def setUp(self):
self.test_root = os.path.join(self.base_temp_dir, self._testMethodName)
self.options = self.make_test_options()
class BaseGitRepoTestFixture(BaseTestFixture):
@classmethod
def setUpClass(cls):
super(BaseGitRepoTestFixture, cls).setUpClass()
cls.repo_commit_map = make_all_standard_git_repos(cls.base_temp_dir)
source_path = os.path.join(os.path.dirname(__file__),
'standard_test_bom.yml')
# Adjust the golden bom so it references the details of
# the test instance specific origin repo we just created in test_util.
with open(source_path, 'r') as stream:
cls.golden_bom = yaml.safe_load(stream.read())
# Change the bom's default gitPrefix to our origin root
cls.golden_bom['artifactSources']['gitPrefix'] = (
os.path.dirname(cls.repo_commit_map[NORMAL_REPO]['ORIGIN']))
# Update the service commit id's in the BOM to the actual id's
# so we can check them out later.
services = cls.golden_bom['services']
for name, entry in services.items():
repo_name = name
if name in ['monitoring-third-party', 'monitoring-daemon']:
repo_name = name = 'spinnaker-monitoring'
if name == OUTLIER_SERVICE:
repo_name = OUTLIER_REPO
entry['commit'] = cls.repo_commit_map[repo_name][PATCH_BRANCH]
logging.debug('FINISH setUpClass %s', cls.__name__)
@classmethod
def to_origin(cls, repo_name):
return cls.repo_commit_map[repo_name]['ORIGIN']
def patch_function(self, name):
patcher = patch(name)
hook = patcher.start()
self.addCleanup(patcher.stop)
return hook
def patch_method(self, klas, method):
patcher = patch.object(klas, method)
hook = patcher.start()
self.addCleanup(patcher.stop)
return hook
def setUp(self):
super(BaseGitRepoTestFixture, self).setUp()
self.options.github_repository_root = self.base_temp_dir
| apache-2.0 |
jorik041/plaso | tests/parsers/chrome_preferences.py | 3 | 2101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Chrome Preferences file parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import chrome_preferences as chrome_preferences_formatter
from plaso.lib import timelib
from plaso.parsers import chrome_preferences
from tests.parsers import test_lib
class ChromePreferencesParserTest(test_lib.ParserTestCase):
"""Tests for the Google Chrome Preferences file parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = chrome_preferences.ChromePreferencesParser()
def testParseFile(self):
"""Tests parsing a default profile Preferences file."""
test_file = self._GetTestFilePath([u'Preferences'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# event_object[0] from the sample is mgndgikekgjfcpckkfioiadnlibdjbkf
event_object = event_objects[0]
self.assertIsInstance(
event_object, chrome_preferences.ChromeExtensionInstallationEvent)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-11-05 18:31:24.154837')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_id = u'mgndgikekgjfcpckkfioiadnlibdjbkf'
self.assertEqual(event_object.extension_id, expected_id)
expected_name = u'Chrome'
self.assertEqual(event_object.extension_name, expected_name)
expected_path = (
u'C:\\Program Files\\Google\\Chrome\\Application\\38.0.2125.111\\'
u'resources\\chrome_app')
self.assertEqual(event_object.path, expected_path)
expected_msg = (
u'CRX ID: {0:s} CRX Name: {1:s} Path: {2:s}'.format(
expected_id, expected_name, expected_path))
expected_short_path = (
u'C:\\Program Files\\Google\\Chrome\\Application\\3...')
expected_short = (u'{0:s} {1:s}'.format(expected_id, expected_short_path))
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Orochimarufan/youtube-dl | youtube_dl/extractor/nowness.py | 54 | 6024 | # coding: utf-8
from __future__ import unicode_literals
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
sanitized_Request,
)
class NownessBaseIE(InfoExtractor):
def _extract_url_result(self, post):
if post['type'] == 'video':
for media in post['media']:
if media['type'] == 'video':
video_id = media['content']
source = media['source']
if source == 'brightcove':
player_code = self._download_webpage(
'http://www.nowness.com/iframe?id=%s' % video_id, video_id,
note='Downloading player JavaScript',
errnote='Unable to download player JavaScript')
bc_url = BrightcoveLegacyIE._extract_brightcove_url(player_code)
if bc_url:
return self.url_result(bc_url, BrightcoveLegacyIE.ie_key())
bc_url = BrightcoveNewIE._extract_url(self, player_code)
if bc_url:
return self.url_result(bc_url, BrightcoveNewIE.ie_key())
raise ExtractorError('Could not find player definition')
elif source == 'vimeo':
return self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo')
elif source == 'youtube':
return self.url_result(video_id, 'Youtube')
elif source == 'cinematique':
# youtube-dl currently doesn't support cinematique
# return self.url_result('http://cinematique.com/embed/%s' % video_id, 'Cinematique')
pass
def _api_request(self, url, request_path):
display_id = self._match_id(url)
request = sanitized_Request(
'http://api.nowness.com/api/' + request_path % display_id,
headers={
'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us',
})
return display_id, self._download_json(request, display_id)
class NownessIE(NownessBaseIE):
IE_NAME = 'nowness'
_VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/(?:story|(?:series|category)/[^/]+)/(?P<id>[^/]+?)(?:$|[?#])'
_TESTS = [{
'url': 'https://www.nowness.com/story/candor-the-art-of-gesticulation',
'md5': '068bc0202558c2e391924cb8cc470676',
'info_dict': {
'id': '2520295746001',
'ext': 'mp4',
'title': 'Candor: The Art of Gesticulation',
'description': 'Candor: The Art of Gesticulation',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1446745676,
'upload_date': '20151105',
'uploader_id': '2385340575001',
},
'add_ie': ['BrightcoveNew'],
}, {
'url': 'https://cn.nowness.com/story/kasper-bjorke-ft-jaakko-eino-kalevi-tnr',
'md5': 'e79cf125e387216f86b2e0a5b5c63aa3',
'info_dict': {
'id': '3716354522001',
'ext': 'mp4',
'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1407315371,
'upload_date': '20140806',
'uploader_id': '2385340575001',
},
'add_ie': ['BrightcoveNew'],
}, {
# vimeo
'url': 'https://www.nowness.com/series/nowness-picks/jean-luc-godard-supercut',
'md5': '9a5a6a8edf806407e411296ab6bc2a49',
'info_dict': {
'id': '130020913',
'ext': 'mp4',
'title': 'Bleu, Blanc, Rouge - A Godard Supercut',
'description': 'md5:f0ea5f1857dffca02dbd37875d742cec',
'thumbnail': r're:^https?://.*\.jpg',
'upload_date': '20150607',
'uploader': 'Cinema Sem Lei',
'uploader_id': 'cinemasemlei',
},
'add_ie': ['Vimeo'],
}]
def _real_extract(self, url):
_, post = self._api_request(url, 'post/getBySlug/%s')
return self._extract_url_result(post)
class NownessPlaylistIE(NownessBaseIE):
IE_NAME = 'nowness:playlist'
_VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/playlist/(?P<id>\d+)'
_TEST = {
'url': 'https://www.nowness.com/playlist/3286/i-guess-thats-why-they-call-it-the-blues',
'info_dict': {
'id': '3286',
},
'playlist_mincount': 8,
}
def _real_extract(self, url):
playlist_id, playlist = self._api_request(url, 'post?PlaylistId=%s')
entries = [self._extract_url_result(item) for item in playlist['items']]
return self.playlist_result(entries, playlist_id)
class NownessSeriesIE(NownessBaseIE):
IE_NAME = 'nowness:series'
_VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/series/(?P<id>[^/]+?)(?:$|[?#])'
_TEST = {
'url': 'https://www.nowness.com/series/60-seconds',
'info_dict': {
'id': '60',
'title': '60 Seconds',
'description': 'One-minute wisdom in a new NOWNESS series',
},
'playlist_mincount': 4,
}
def _real_extract(self, url):
display_id, series = self._api_request(url, 'series/getBySlug/%s')
entries = [self._extract_url_result(post) for post in series['posts']]
series_title = None
series_description = None
translations = series.get('translations', [])
if translations:
series_title = translations[0].get('title') or translations[0]['seoTitle']
series_description = translations[0].get('seoDescription')
return self.playlist_result(
entries, compat_str(series['id']), series_title, series_description)
| unlicense |
sileht/aodh | aodh/tests/api/v2/test_acl_scenarios.py | 1 | 4386 | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test ACL."""
import datetime
import hashlib
import json
import mock
from oslo_utils import timeutils
import webtest
from aodh.api import app
from aodh.tests.api import v2
from aodh.tests import db as tests_db
VALID_TOKEN = '4562138218392831'
VALID_TOKEN2 = '4562138218392832'
class FakeMemcache(object):
TOKEN_HASH = hashlib.sha256(VALID_TOKEN.encode('utf-8')).hexdigest()
TOKEN2_HASH = hashlib.sha256(VALID_TOKEN2.encode('utf-8')).hexdigest()
def get(self, key):
if (key == "tokens/%s" % VALID_TOKEN or
key == "tokens/%s" % self.TOKEN_HASH):
dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
return json.dumps(({'access': {
'token': {'id': VALID_TOKEN,
'expires': timeutils.isotime(dt)},
'user': {
'id': 'user_id1',
'name': 'user_name1',
'tenantId': '123i2910',
'tenantName': 'mytenant',
'roles': [
{'name': 'admin'},
]},
}}, timeutils.isotime(dt)))
if (key == "tokens/%s" % VALID_TOKEN2 or
key == "tokens/%s" % self.TOKEN2_HASH):
dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
return json.dumps(({'access': {
'token': {'id': VALID_TOKEN2,
'expires': timeutils.isotime(dt)},
'user': {
'id': 'user_id2',
'name': 'user-good',
'tenantId': 'project-good',
'tenantName': 'goodies',
'roles': [
{'name': 'Member'},
]},
}}, timeutils.isotime(dt)))
@staticmethod
def set(key, value, **kwargs):
pass
class TestAPIACL(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestAPIACL, self).setUp()
self.environ = {'fake.cache': FakeMemcache()}
def get_json(self, path, expect_errors=False, headers=None,
q=None, **params):
return super(TestAPIACL, self).get_json(path,
expect_errors=expect_errors,
headers=headers,
q=q or [],
extra_environ=self.environ,
**params)
def _make_app(self):
self.CONF.set_override("cache", "fake.cache", 'keystone_authtoken')
file_name = self.path_get('etc/aodh/api_paste.ini')
self.CONF.set_override("paste_config", file_name, "api")
# We need the other call to prepare_service in app.py to return the
# same tweaked conf object.
with mock.patch('aodh.service.prepare_service') as ps:
ps.return_value = self.CONF
return webtest.TestApp(app.load_app(conf=self.CONF))
def test_non_authenticated(self):
response = self.get_json('/meters', expect_errors=True)
self.assertEqual(401, response.status_int)
def test_authenticated_wrong_role(self):
response = self.get_json('/meters',
expect_errors=True,
headers={
"X-Roles": "Member",
"X-Tenant-Name": "admin",
"X-Project-Id":
"bc23a9d531064583ace8f67dad60f6bb",
})
self.assertEqual(401, response.status_int)
| apache-2.0 |
appneta/boto | tests/integration/cognito/identity/test_cognito_identity.py | 112 | 2545 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.cognito.identity.exceptions import ResourceNotFoundException
from tests.integration.cognito import CognitoTest
class TestCognitoIdentity(CognitoTest):
"""
Test Cognitoy identity pools operations since individual Cognito identities
require an AWS account ID.
"""
def test_cognito_identity(self):
# Ensure the identity pool is in the list of pools.
response = self.cognito_identity.list_identity_pools(max_results=5)
expected_identity = {'IdentityPoolId': self.identity_pool_id,
'IdentityPoolName': self.identity_pool_name}
self.assertIn(expected_identity, response['IdentityPools'])
# Ensure the pool's attributes are as expected.
response = self.cognito_identity.describe_identity_pool(
identity_pool_id=self.identity_pool_id
)
self.assertEqual(response['IdentityPoolName'], self.identity_pool_name)
self.assertEqual(response['IdentityPoolId'], self.identity_pool_id)
self.assertFalse(response['AllowUnauthenticatedIdentities'])
def test_resource_not_found_exception(self):
with self.assertRaises(ResourceNotFoundException):
# Note the region is us-east-0 which is an invalid region name.
self.cognito_identity.describe_identity_pool(
identity_pool_id='us-east-0:c09e640-b014-4822-86b9-ec77c40d8d6f'
)
| mit |
efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/python2.7/idlelib/macosxSupport.py | 51 | 6222 | """
A number of function that enhance IDLE on MacOSX when it used as a normal
GUI application (as opposed to an X11 application).
"""
import sys
import Tkinter
from os import path
_appbundle = None
def runningAsOSXApp():
"""
Returns True if Python is running from within an app on OSX.
If so, assume that Python was built with Aqua Tcl/Tk rather than
X11 Tcl/Tk.
"""
global _appbundle
if _appbundle is None:
_appbundle = (sys.platform == 'darwin' and '.app' in sys.executable)
return _appbundle
_carbonaquatk = None
def isCarbonAquaTk(root):
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
global _carbonaquatk
if _carbonaquatk is None:
_carbonaquatk = (runningAsOSXApp() and
'aqua' in root.tk.call('tk', 'windowingsystem') and
'AppKit' not in root.tk.call('winfo', 'server', '.'))
return _carbonaquatk
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
be one known to cause problems with IDLE.
1. Apple Cocoa-based Tk 8.5.7 shipped with Mac OS X 10.6 is unusable.
2. Apple Cocoa-based Tk 8.5.9 in OS X 10.7 and 10.8 is better but
can still crash unexpectedly.
"""
if (runningAsOSXApp() and
('AppKit' in root.tk.call('winfo', 'server', '.')) ):
patchlevel = root.tk.call('info', 'patchlevel')
if patchlevel not in ('8.5.7', '8.5.9'):
return False
return (r"WARNING: The version of Tcl/Tk ({0}) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
r" for current information.".format(patchlevel))
else:
return False
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respond to open AppleEvents, which
makes is feasible to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except Tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that's more appropriate for
IDLE.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from Tkinter import Menu, Text, Text
from idlelib.EditorWindow import prepstr, get_accelerator
from idlelib import Bindings
from idlelib import WindowList
from idlelib.MultiCall import MultiCallCreator
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
def about_dialog(event=None):
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
from idlelib import configDialog
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
from idlelib import textView
fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
textView.view_file(root, 'Help', fn)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on MacOSX. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
if isCarbonAquaTk(root):
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
Bindings.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
tkversion = root.tk.eval('info patchlevel')
if tuple(map(int, tkversion.split('.'))) < (8, 4, 14):
# for earlier AquaTk versions, supply a Preferences menu item
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
else:
# assume Cocoa AquaTk
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del Bindings.menudefs[-1][1][0]
def setupApp(root, flist):
"""
Perform setup for the OSX application bundle.
"""
if not runningAsOSXApp(): return
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
| apache-2.0 |
pulsar-chem/Pulsar-Core | lib/systems/d-leucine.py | 1 | 1231 | import pulsar as psr
def load_ref_system():
""" Returns d-leucine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 1.6125 -0.7205 -0.4004
C 0.1080 -0.4462 -0.4641
C -0.2699 0.8384 0.2826
H 0.3344 0.9224 1.2098
H -0.0068 1.7319 -0.3205
C -1.7556 0.8767 0.6509
C -2.1072 2.2297 1.2470
N -0.3079 -0.4667 -1.8903
H -1.5210 2.4380 2.1524
H -3.1684 2.2756 1.5245
H -1.9157 3.0446 0.5366
C -2.0986 -0.2378 1.6267
O 2.4474 -0.1553 0.2742
O 2.1747 -1.7197 -1.1139
H -0.4083 -1.3078 0.0487
H 0.0311 0.3508 -2.3538
H -1.3049 -0.4841 -1.9430
H 1.5369 -2.1281 -1.6870
H -2.3650 0.7354 -0.2761
H -3.1801 -0.3051 1.7963
H -1.6188 -0.0847 2.6027
H -1.7543 -1.2119 1.2402
""")
| bsd-3-clause |
sillvan/laikaboss | laikaboss/modules/decode_base64.py | 20 | 1160 | # Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from laikaboss.objectmodel import ModuleObject, ExternalVars
from laikaboss.si_module import SI_MODULE
class DECODE_BASE64(SI_MODULE):
def __init__(self,):
self.module_name = "DECODE_BASE64"
def _run(self, scanObject, result, depth, args):
moduleResult = []
try:
decoded = base64.b64decode(scanObject.buffer)
moduleResult.append(ModuleObject(buffer=decoded, externalVars=ExternalVars(filename="d_base64_%s" % len(decoded))))
return moduleResult
except:
raise
| apache-2.0 |
map222/spark | python/pyspark/mllib/clustering.py | 59 | 36931 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import array as pyarray
import warnings
if sys.version > '3':
xrange = range
basestring = str
from math import exp, log
from numpy import array, random, tile
from collections import namedtuple
from pyspark import SparkContext, since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, callJavaFunc, _py2java, _java2py
from pyspark.mllib.linalg import SparseVector, _convert_to_vector, DenseVector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.distribution import MultivariateGaussian
from pyspark.mllib.util import Saveable, Loader, inherit_doc, JavaLoader, JavaSaveable
from pyspark.streaming import DStream
__all__ = ['BisectingKMeansModel', 'BisectingKMeans', 'KMeansModel', 'KMeans',
'GaussianMixtureModel', 'GaussianMixture', 'PowerIterationClusteringModel',
'PowerIterationClustering', 'StreamingKMeans', 'StreamingKMeansModel',
'LDA', 'LDAModel']
@inherit_doc
class BisectingKMeansModel(JavaModelWrapper):
"""
A clustering model derived from the bisecting k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> bskm = BisectingKMeans()
>>> model = bskm.train(sc.parallelize(data, 2), k=4)
>>> p = array([0.0, 0.0])
>>> model.predict(p)
0
>>> model.k
4
>>> model.computeCost(p)
0.0
.. versionadded:: 2.0.0
"""
def __init__(self, java_model):
super(BisectingKMeansModel, self).__init__(java_model)
self.centers = [c.toArray() for c in self.call("clusterCenters")]
@property
@since('2.0.0')
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy
arrays."""
return self.centers
@property
@since('2.0.0')
def k(self):
"""Get the number of clusters"""
return self.call("k")
@since('2.0.0')
def predict(self, x):
"""
Find the cluster that each of the points belongs to in this
model.
:param x:
A data point (or RDD of points) to determine cluster index.
:return:
Predicted cluster index or an RDD of predicted cluster indices
if the input is an RDD.
"""
if isinstance(x, RDD):
vecs = x.map(_convert_to_vector)
return self.call("predict", vecs)
x = _convert_to_vector(x)
return self.call("predict", x)
@since('2.0.0')
def computeCost(self, x):
"""
Return the Bisecting K-means cost (sum of squared distances of
points to their nearest center) for this model on the given
data. If provided with an RDD of points returns the sum.
:param point:
A data point (or RDD of points) to compute the cost(s).
"""
if isinstance(x, RDD):
vecs = x.map(_convert_to_vector)
return self.call("computeCost", vecs)
return self.call("computeCost", _convert_to_vector(x))
class BisectingKMeans(object):
"""
A bisecting k-means algorithm based on the paper "A comparison of
document clustering techniques" by Steinbach, Karypis, and Kumar,
with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and
bisects each of them using k-means, until there are `k` leaf
clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped
together to increase parallelism. If bisecting all divisible
clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
Based on
U{http://glaros.dtc.umn.edu/gkhome/fetch/papers/docclusterKDDTMW00.pdf}
Steinbach, Karypis, and Kumar, A comparison of document clustering
techniques, KDD Workshop on Text Mining, 2000.
.. versionadded:: 2.0.0
"""
@classmethod
@since('2.0.0')
def train(self, rdd, k=4, maxIterations=20, minDivisibleClusterSize=1.0, seed=-1888008604):
"""
Runs the bisecting k-means algorithm return the model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
The desired number of leaf clusters. The actual number could
be smaller if there are no divisible leaf clusters.
(default: 4)
:param maxIterations:
Maximum number of iterations allowed to split clusters.
(default: 20)
:param minDivisibleClusterSize:
Minimum number of points (if >= 1.0) or the minimum proportion
of points (if < 1.0) of a divisible cluster.
(default: 1)
:param seed:
Random seed value for cluster initialization.
(default: -1888008604 from classOf[BisectingKMeans].getName.##)
"""
java_model = callMLlibFunc(
"trainBisectingKMeans", rdd.map(_convert_to_vector),
k, maxIterations, minDivisibleClusterSize, seed)
return BisectingKMeansModel(java_model)
@inherit_doc
class KMeansModel(Saveable, Loader):
"""A clustering model derived from the k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> model = KMeans.train(
... sc.parallelize(data), 2, maxIterations=10, initializationMode="random",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0]))
True
>>> model.predict(array([8.0, 9.0])) == model.predict(array([9.0, 8.0]))
True
>>> model.k
2
>>> model.computeCost(sc.parallelize(data))
2.0000000000000004
>>> model = KMeans.train(sc.parallelize(data), 2)
>>> sparse_data = [
... SparseVector(3, {1: 1.0}),
... SparseVector(3, {1: 1.1}),
... SparseVector(3, {2: 1.0}),
... SparseVector(3, {2: 1.1})
... ]
>>> model = KMeans.train(sc.parallelize(sparse_data), 2, initializationMode="k-means||",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0., 1., 0.])) == model.predict(array([0, 1.1, 0.]))
True
>>> model.predict(array([0., 0., 1.])) == model.predict(array([0, 0, 1.1]))
True
>>> model.predict(sparse_data[0]) == model.predict(sparse_data[1])
True
>>> model.predict(sparse_data[2]) == model.predict(sparse_data[3])
True
>>> isinstance(model.clusterCenters, list)
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = KMeansModel.load(sc, path)
>>> sameModel.predict(sparse_data[0]) == model.predict(sparse_data[0])
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-383.1,-382.9, 28.7,31.2, 366.2,367.3]).reshape(3, 2)
>>> model = KMeans.train(sc.parallelize(data), 3, maxIterations=0,
... initialModel = KMeansModel([(-1000.0,-1000.0),(5.0,5.0),(1000.0,1000.0)]))
>>> model.clusterCenters
[array([-1000., -1000.]), array([ 5., 5.]), array([ 1000., 1000.])]
.. versionadded:: 0.9.0
"""
def __init__(self, centers):
self.centers = centers
@property
@since('1.0.0')
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return self.centers
@property
@since('1.4.0')
def k(self):
"""Total number of clusters."""
return len(self.centers)
@since('0.9.0')
def predict(self, x):
"""
Find the cluster that each of the points belongs to in this
model.
:param x:
A data point (or RDD of points) to determine cluster index.
:return:
Predicted cluster index or an RDD of predicted cluster indices
if the input is an RDD.
"""
best = 0
best_distance = float("inf")
if isinstance(x, RDD):
return x.map(self.predict)
x = _convert_to_vector(x)
for i in xrange(len(self.centers)):
distance = x.squared_distance(self.centers[i])
if distance < best_distance:
best = i
best_distance = distance
return best
@since('1.4.0')
def computeCost(self, rdd):
"""
Return the K-means cost (sum of squared distances of points to
their nearest center) for this model on the given
data.
:param rdd:
The RDD of points to compute the cost on.
"""
cost = callMLlibFunc("computeCostKmeansModel", rdd.map(_convert_to_vector),
[_convert_to_vector(c) for c in self.centers])
return cost
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_centers = _py2java(sc, [_convert_to_vector(c) for c in self.centers])
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel(java_centers)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel.load(sc._jsc.sc(), path)
return KMeansModel(_java2py(sc, java_model.clusterCenters()))
class KMeans(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||",
seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None):
"""
Train a k-means clustering model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
Number of clusters to create.
:param maxIterations:
Maximum number of iterations allowed.
(default: 100)
:param runs:
This param has no effect since Spark 2.0.0.
:param initializationMode:
The initialization algorithm. This can be either "random" or
"k-means||".
(default: "k-means||")
:param seed:
Random seed value for cluster initialization. Set as None to
generate seed based on system time.
(default: None)
:param initializationSteps:
Number of steps for the k-means|| initialization mode.
This is an advanced setting -- the default of 2 is almost
always enough.
(default: 2)
:param epsilon:
Distance threshold within which a center will be considered to
have converged. If all centers move less than this Euclidean
distance, iterations are stopped.
(default: 1e-4)
:param initialModel:
Initial cluster centers can be provided as a KMeansModel object
rather than using the random or k-means|| initializationModel.
(default: None)
"""
if runs != 1:
warnings.warn("The param `runs` has no effect since Spark 2.0.0.")
clusterInitialModel = []
if initialModel is not None:
if not isinstance(initialModel, KMeansModel):
raise Exception("initialModel is of "+str(type(initialModel))+". It needs "
"to be of <type 'KMeansModel'>")
clusterInitialModel = [_convert_to_vector(c) for c in initialModel.clusterCenters]
model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations,
runs, initializationMode, seed, initializationSteps, epsilon,
clusterInitialModel)
centers = callJavaFunc(rdd.context, model.clusterCenters)
return KMeansModel([c.toArray() for c in centers])
@inherit_doc
class GaussianMixtureModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A clustering model derived from the Gaussian Mixture Model method.
>>> from pyspark.mllib.linalg import Vectors, DenseMatrix
>>> from numpy.testing import assert_equal
>>> from shutil import rmtree
>>> import os, tempfile
>>> clusterdata_1 = sc.parallelize(array([-0.1,-0.05,-0.01,-0.1,
... 0.9,0.8,0.75,0.935,
... -0.83,-0.68,-0.91,-0.76 ]).reshape(6, 2), 2)
>>> model = GaussianMixture.train(clusterdata_1, 3, convergenceTol=0.0001,
... maxIterations=50, seed=10)
>>> labels = model.predict(clusterdata_1).collect()
>>> labels[0]==labels[1]
False
>>> labels[1]==labels[2]
False
>>> labels[4]==labels[5]
True
>>> model.predict([-0.1,-0.05])
0
>>> softPredicted = model.predictSoft([-0.1,-0.05])
>>> abs(softPredicted[0] - 1.0) < 0.001
True
>>> abs(softPredicted[1] - 0.0) < 0.001
True
>>> abs(softPredicted[2] - 0.0) < 0.001
True
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = GaussianMixtureModel.load(sc, path)
>>> assert_equal(model.weights, sameModel.weights)
>>> mus, sigmas = list(
... zip(*[(g.mu, g.sigma) for g in model.gaussians]))
>>> sameMus, sameSigmas = list(
... zip(*[(g.mu, g.sigma) for g in sameModel.gaussians]))
>>> mus == sameMus
True
>>> sigmas == sameSigmas
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-5.1971, -2.5359, -3.8220,
... -5.2211, -5.0602, 4.7118,
... 6.8989, 3.4592, 4.6322,
... 5.7048, 4.6567, 5.5026,
... 4.5605, 5.2043, 6.2734])
>>> clusterdata_2 = sc.parallelize(data.reshape(5,3))
>>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001,
... maxIterations=150, seed=4)
>>> labels = model.predict(clusterdata_2).collect()
>>> labels[0]==labels[1]
True
>>> labels[2]==labels[3]==labels[4]
True
.. versionadded:: 1.3.0
"""
@property
@since('1.4.0')
def weights(self):
"""
Weights for each Gaussian distribution in the mixture, where weights[i] is
the weight for Gaussian i, and weights.sum == 1.
"""
return array(self.call("weights"))
@property
@since('1.4.0')
def gaussians(self):
"""
Array of MultivariateGaussian where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i.
"""
return [
MultivariateGaussian(gaussian[0], gaussian[1])
for gaussian in self.call("gaussians")]
@property
@since('1.4.0')
def k(self):
"""Number of gaussians in mixture."""
return len(self.weights)
@since('1.3.0')
def predict(self, x):
"""
Find the cluster to which the point 'x' or each point in RDD 'x'
has maximum membership in this model.
:param x:
A feature vector or an RDD of vectors representing data points.
:return:
Predicted cluster label or an RDD of predicted cluster labels
if the input is an RDD.
"""
if isinstance(x, RDD):
cluster_labels = self.predictSoft(x).map(lambda z: z.index(max(z)))
return cluster_labels
else:
z = self.predictSoft(x)
return z.argmax()
@since('1.3.0')
def predictSoft(self, x):
"""
Find the membership of point 'x' or each point in RDD 'x' to all mixture components.
:param x:
A feature vector or an RDD of vectors representing data points.
:return:
The membership value to all mixture components for vector 'x'
or each vector in RDD 'x'.
"""
if isinstance(x, RDD):
means, sigmas = zip(*[(g.mu, g.sigma) for g in self.gaussians])
membership_matrix = callMLlibFunc("predictSoftGMM", x.map(_convert_to_vector),
_convert_to_vector(self.weights), means, sigmas)
return membership_matrix.map(lambda x: pyarray.array('d', x))
else:
return self.call("predictSoft", _convert_to_vector(x)).toArray()
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""Load the GaussianMixtureModel from disk.
:param sc:
SparkContext.
:param path:
Path to where the model is stored.
"""
model = cls._load_java(sc, path)
wrapper = sc._jvm.org.apache.spark.mllib.api.python.GaussianMixtureModelWrapper(model)
return cls(wrapper)
class GaussianMixture(object):
"""
Learning algorithm for Gaussian Mixtures using the expectation-maximization algorithm.
.. versionadded:: 1.3.0
"""
@classmethod
@since('1.3.0')
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None):
"""
Train a Gaussian Mixture clustering model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
Number of independent Gaussians in the mixture model.
:param convergenceTol:
Maximum change in log-likelihood at which convergence is
considered to have occurred.
(default: 1e-3)
:param maxIterations:
Maximum number of iterations allowed.
(default: 100)
:param seed:
Random seed for initial Gaussian distribution. Set as None to
generate seed based on system time.
(default: None)
:param initialModel:
Initial GMM starting point, bypassing the random
initialization.
(default: None)
"""
initialModelWeights = None
initialModelMu = None
initialModelSigma = None
if initialModel is not None:
if initialModel.k != k:
raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s"
% (initialModel.k, k))
initialModelWeights = list(initialModel.weights)
initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)]
initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)]
java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector),
k, convergenceTol, maxIterations, seed,
initialModelWeights, initialModelMu, initialModelSigma)
return GaussianMixtureModel(java_model)
class PowerIterationClusteringModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
Model produced by [[PowerIterationClustering]].
>>> import math
>>> def genCircle(r, n):
... points = []
... for i in range(0, n):
... theta = 2.0 * math.pi * i / n
... points.append((r * math.cos(theta), r * math.sin(theta)))
... return points
>>> def sim(x, y):
... dist2 = (x[0] - y[0]) * (x[0] - y[0]) + (x[1] - y[1]) * (x[1] - y[1])
... return math.exp(-dist2 / 2.0)
>>> r1 = 1.0
>>> n1 = 10
>>> r2 = 4.0
>>> n2 = 40
>>> n = n1 + n2
>>> points = genCircle(r1, n1) + genCircle(r2, n2)
>>> similarities = [(i, j, sim(points[i], points[j])) for i in range(1, n) for j in range(0, i)]
>>> rdd = sc.parallelize(similarities, 2)
>>> model = PowerIterationClustering.train(rdd, 2, 40)
>>> model.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = PowerIterationClusteringModel.load(sc, path)
>>> sameModel.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.5.0
"""
@property
@since('1.5.0')
def k(self):
"""
Returns the number of clusters.
"""
return self.call("k")
@since('1.5.0')
def assignments(self):
"""
Returns the cluster assignments of this model.
"""
return self.call("getAssignments").map(
lambda x: (PowerIterationClustering.Assignment(*x)))
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
model = cls._load_java(sc, path)
wrapper =\
sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model)
return PowerIterationClusteringModel(wrapper)
class PowerIterationClustering(object):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm
developed by [[http://www.icml2010.org/papers/387.pdf Lin and Cohen]].
From the abstract: PIC finds a very low-dimensional embedding of a
dataset using truncated power iteration on a normalized pair-wise
similarity matrix of the data.
.. versionadded:: 1.5.0
"""
@classmethod
@since('1.5.0')
def train(cls, rdd, k, maxIterations=100, initMode="random"):
"""
:param rdd:
An RDD of (i, j, s\ :sub:`ij`\) tuples representing the
affinity matrix, which is the matrix A in the PIC paper. The
similarity s\ :sub:`ij`\ must be nonnegative. This is a symmetric
matrix and hence s\ :sub:`ij`\ = s\ :sub:`ji`\ For any (i, j) with
nonzero similarity, there should be either (i, j, s\ :sub:`ij`\) or
(j, i, s\ :sub:`ji`\) in the input. Tuples with i = j are ignored,
because it is assumed s\ :sub:`ij`\ = 0.0.
:param k:
Number of clusters.
:param maxIterations:
Maximum number of iterations of the PIC algorithm.
(default: 100)
:param initMode:
Initialization mode. This can be either "random" to use
a random vector as vertex properties, or "degree" to use
normalized sum similarities.
(default: "random")
"""
model = callMLlibFunc("trainPowerIterationClusteringModel",
rdd.map(_convert_to_vector), int(k), int(maxIterations), initMode)
return PowerIterationClusteringModel(model)
class Assignment(namedtuple("Assignment", ["id", "cluster"])):
"""
Represents an (id, cluster) tuple.
.. versionadded:: 1.5.0
"""
class StreamingKMeansModel(KMeansModel):
"""
Clustering model which can perform an online update of the centroids.
The update formula for each centroid is given by
* c_t+1 = ((c_t * n_t * a) + (x_t * m_t)) / (n_t + m_t)
* n_t+1 = n_t * a + m_t
where
* c_t: Centroid at the n_th iteration.
* n_t: Number of samples (or) weights associated with the centroid
at the n_th iteration.
* x_t: Centroid of the new data closest to c_t.
* m_t: Number of samples (or) weights of the new data closest to c_t
* c_t+1: New centroid.
* n_t+1: New number of weights.
* a: Decay Factor, which gives the forgetfulness.
.. note:: If a is set to 1, it is the weighted mean of the previous
and new data. If it set to zero, the old centroids are completely
forgotten.
:param clusterCenters:
Initial cluster centers.
:param clusterWeights:
List of weights assigned to each cluster.
>>> initCenters = [[0.0, 0.0], [1.0, 1.0]]
>>> initWeights = [1.0, 1.0]
>>> stkm = StreamingKMeansModel(initCenters, initWeights)
>>> data = sc.parallelize([[-0.1, -0.1], [0.1, 0.1],
... [0.9, 0.9], [1.1, 1.1]])
>>> stkm = stkm.update(data, 1.0, u"batches")
>>> stkm.centers
array([[ 0., 0.],
[ 1., 1.]])
>>> stkm.predict([-0.1, -0.1])
0
>>> stkm.predict([0.9, 0.9])
1
>>> stkm.clusterWeights
[3.0, 3.0]
>>> decayFactor = 0.0
>>> data = sc.parallelize([DenseVector([1.5, 1.5]), DenseVector([0.2, 0.2])])
>>> stkm = stkm.update(data, 0.0, u"batches")
>>> stkm.centers
array([[ 0.2, 0.2],
[ 1.5, 1.5]])
>>> stkm.clusterWeights
[1.0, 1.0]
>>> stkm.predict([0.2, 0.2])
0
>>> stkm.predict([1.5, 1.5])
1
.. versionadded:: 1.5.0
"""
def __init__(self, clusterCenters, clusterWeights):
super(StreamingKMeansModel, self).__init__(centers=clusterCenters)
self._clusterWeights = list(clusterWeights)
@property
@since('1.5.0')
def clusterWeights(self):
"""Return the cluster weights."""
return self._clusterWeights
@ignore_unicode_prefix
@since('1.5.0')
def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor
is raised to the power of number of new points and if batches,
then decay factor will be used as is.
"""
if not isinstance(data, RDD):
raise TypeError("Data should be of an RDD, got %s." % type(data))
data = data.map(_convert_to_vector)
decayFactor = float(decayFactor)
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
vectorCenters = [_convert_to_vector(center) for center in self.centers]
updatedModel = callMLlibFunc(
"updateStreamingKMeansModel", vectorCenters, self._clusterWeights,
data, decayFactor, timeUnit)
self.centers = array(updatedModel[0])
self._clusterWeights = list(updatedModel[1])
return self
class StreamingKMeans(object):
"""
Provides methods to set k, decayFactor, timeUnit to configure the
KMeans algorithm for fitting and predicting on incoming dstreams.
More details on how the centroids are updated are provided under the
docs of StreamingKMeansModel.
:param k:
Number of clusters.
(default: 2)
:param decayFactor:
Forgetfulness of the previous centroids.
(default: 1.0)
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor is
raised to the power of number of new points and if batches, then
decay factor will be used as is.
(default: "batches")
.. versionadded:: 1.5.0
"""
def __init__(self, k=2, decayFactor=1.0, timeUnit="batches"):
self._k = k
self._decayFactor = decayFactor
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
self._timeUnit = timeUnit
self._model = None
@since('1.5.0')
def latestModel(self):
"""Return the latest model"""
return self._model
def _validate(self, dstream):
if self._model is None:
raise ValueError(
"Initial centers should be set either by setInitialCenters "
"or setRandomCenters.")
if not isinstance(dstream, DStream):
raise TypeError(
"Expected dstream to be of type DStream, "
"got type %s" % type(dstream))
@since('1.5.0')
def setK(self, k):
"""Set number of clusters."""
self._k = k
return self
@since('1.5.0')
def setDecayFactor(self, decayFactor):
"""Set decay factor."""
self._decayFactor = decayFactor
return self
@since('1.5.0')
def setHalfLife(self, halfLife, timeUnit):
"""
Set number of batches after which the centroids of that
particular batch has half the weightage.
"""
self._timeUnit = timeUnit
self._decayFactor = exp(log(0.5) / halfLife)
return self
@since('1.5.0')
def setInitialCenters(self, centers, weights):
"""
Set initial centers. Should be set before calling trainOn.
"""
self._model = StreamingKMeansModel(centers, weights)
return self
@since('1.5.0')
def setRandomCenters(self, dim, weight, seed):
"""
Set the initial centres to be random samples from
a gaussian population with constant weights.
"""
rng = random.RandomState(seed)
clusterCenters = rng.randn(self._k, dim)
clusterWeights = tile(weight, self._k)
self._model = StreamingKMeansModel(clusterCenters, clusterWeights)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
self._model.update(rdd, self._decayFactor, self._timeUnit)
dstream.foreachRDD(update)
@since('1.5.0')
def predictOn(self, dstream):
"""
Make predictions on a dstream.
Returns a transformed dstream object
"""
self._validate(dstream)
return dstream.map(lambda x: self._model.predict(x))
@since('1.5.0')
def predictOnValues(self, dstream):
"""
Make predictions on a keyed dstream.
Returns a transformed dstream object.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x))
class LDAModel(JavaModelWrapper, JavaSaveable, Loader):
""" A clustering model derived from the LDA method.
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology
- "word" = "term": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over words representing some concept
References:
- Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
>>> from pyspark.mllib.linalg import Vectors
>>> from numpy.testing import assert_almost_equal, assert_equal
>>> data = [
... [1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],
... ]
>>> rdd = sc.parallelize(data)
>>> model = LDA.train(rdd, k=2, seed=1)
>>> model.vocabSize()
2
>>> model.describeTopics()
[([1, 0], [0.5..., 0.49...]), ([0, 1], [0.5..., 0.49...])]
>>> model.describeTopics(1)
[([1], [0.5...]), ([0], [0.5...])]
>>> topics = model.topicsMatrix()
>>> topics_expect = array([[0.5, 0.5], [0.5, 0.5]])
>>> assert_almost_equal(topics, topics_expect, 1)
>>> import os, tempfile
>>> from shutil import rmtree
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = LDAModel.load(sc, path)
>>> assert_equal(sameModel.topicsMatrix(), model.topicsMatrix())
>>> sameModel.vocabSize() == model.vocabSize()
True
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.5.0
"""
@since('1.5.0')
def topicsMatrix(self):
"""Inferred topics, where each topic is represented by a distribution over terms."""
return self.call("topicsMatrix").toArray()
@since('1.5.0')
def vocabSize(self):
"""Vocabulary size (number of terms or terms in the vocabulary)"""
return self.call("vocabSize")
@since('1.6.0')
def describeTopics(self, maxTermsPerTopic=None):
"""Return the topics described by weighted terms.
WARNING: If vocabSize and k are large, this can return a large object!
:param maxTermsPerTopic:
Maximum number of terms to collect for each topic.
(default: vocabulary size)
:return:
Array over topics. Each topic is represented as a pair of
matching arrays: (term indices, term weights in topic).
Each topic's terms are sorted in order of decreasing weight.
"""
if maxTermsPerTopic is None:
topics = self.call("describeTopics")
else:
topics = self.call("describeTopics", maxTermsPerTopic)
return topics
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""Load the LDAModel from disk.
:param sc:
SparkContext.
:param path:
Path to where the model is stored.
"""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
model = callMLlibFunc("loadLDAModel", sc, path)
return LDAModel(model)
class LDA(object):
"""
.. versionadded:: 1.5.0
"""
@classmethod
@since('1.5.0')
def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,
topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"):
"""Train a LDA model.
:param rdd:
RDD of documents, which are tuples of document IDs and term
(word) count vectors. The term count vectors are "bags of
words" with a fixed-size vocabulary (where the vocabulary size
is the length of the vector). Document IDs must be unique
and >= 0.
:param k:
Number of topics to infer, i.e., the number of soft cluster
centers.
(default: 10)
:param maxIterations:
Maximum number of iterations allowed.
(default: 20)
:param docConcentration:
Concentration parameter (commonly named "alpha") for the prior
placed on documents' distributions over topics ("theta").
(default: -1.0)
:param topicConcentration:
Concentration parameter (commonly named "beta" or "eta") for
the prior placed on topics' distributions over terms.
(default: -1.0)
:param seed:
Random seed for cluster initialization. Set as None to generate
seed based on system time.
(default: None)
:param checkpointInterval:
Period (in iterations) between checkpoints.
(default: 10)
:param optimizer:
LDAOptimizer used to perform the actual calculation. Currently
"em", "online" are supported.
(default: "em")
"""
model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations,
docConcentration, topicConcentration, seed,
checkpointInterval, optimizer)
return LDAModel(model)
def _test():
import doctest
import pyspark.mllib.clustering
globs = pyspark.mllib.clustering.__dict__.copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Tao-Ma/incubator-hawq | tools/bin/gppylib/testold/testUtils.py | 40 | 7004 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2010. All Rights Reserved.
#
#
# THIS IMPORT MUST COME FIRST
# import mainUtils FIRST to get python version check
#
from gppylib.mainUtils import *
import os, sys, traceback
gProgramName = os.path.split(sys.argv[0])[-1]
from gppylib.commands.base import setExecutionContextFactory, ExecutionContext,CommandResult
from gppylib import gplog
from gppylib.commands import unix
from gppylib.system import configurationInterface as configInterface
from gppylib.system import configurationImplGpdb as systemConf
# todo: find proper home for this
gCommandLineToCommandSimulator = {}
def clearCommandSimulators():
gCommandLineToCommandSimulator = {}
def addCommandSimulator(commandLine, simulator ):
gCommandLineToCommandSimulator[commandLine] = simulator
class TestExecutionContext(ExecutionContext):
# todo: clean this up (make private), but only when completed in LocalExecutionContext is inspected
completed = False
halt = False
def __init__(self, execution_context_id, remoteHost, stdin):
self.execution_context_id = execution_context_id
self.remoteHost = remoteHost
self.stdin = stdin
def execute(self,cmd):
testOutput("exec %s" % cmd.cmdStr)
simulator = gCommandLineToCommandSimulator.get(cmd.cmdStr)
if simulator is None:
(rc,stdoutValue,stderrValue) = (0, [], [])
else:
(rc,stdoutValue,stderrValue) = simulator.simulate(cmd.cmdStr)
self.completed=True
result = CommandResult(rc,"".join(stdoutValue),"".join(stderrValue), self.completed, self.halt)
cmd.set_results(result)
def interrupt(self):
raise Exception("not implemented") # implement this when needed for testing
def cancel(self):
raise Exception("not implemented") # implement this when needed for testing
class TestExecutionContextFactory:
def createExecutionContext(self,execution_context_id, remoteHost, stdin):
return TestExecutionContext(execution_context_id, remoteHost, stdin)
gTestResults = []
gTestOutput = None
def testOutput(o) :
global gTestOutput
if gTestOutput is not None:
gTestOutput.append(str(o))
def finishTest(expectedOutputStr):
global gTestOutput
global gTestName
output = "\n".join(gTestOutput)
if output == expectedOutputStr:
gTestResults.append((gTestName, True, None))
else:
# todo: on diff, produce a nicer diff output for large strings!
msg = "Test %s failed. EXPECTED OUTPUT (surrounding triple quotes added by this output):\n\"\"\"%s\"\"\"\n\n" \
"ACTUAL OUTPUT (surrounding triple quotes added by this output):\n\"\"\"%s\"\"\"" % (gTestName, expectedOutputStr, output)
gTestResults.append((gTestName, False,msg))
gTestOutput = None
gTestName = None
def startTest(testName):
global gTestOutput
global gTestName
gTestOutput = []
gTestName = testName
def printTestResults():
global gTestResults
numFailures = 0
numSuccesses = 0
for test in gTestResults:
if ( test[1]):
numSuccesses += 1
print >> sys.stderr, "SUCCESS: %s passed" % test[0]
else:
numFailures += 1
print >> sys.stderr, "FAILURE: %s failed\n%s\n\n" % (test[0], test[2])
if numFailures == 0:
print >> sys.stderr, "ALL %s TESTS SUCCEEDED" % numSuccesses
else:
print >> sys.stderr, "%s tests succeeded" % numSuccesses
print >> sys.stderr, "%s tests FAILED" % numFailures
def resetTestResults():
global gTestResults
gTestResults = []
def test_main( testName, newProgramArgs, createOptionParserFn, createCommandFn, extraOutputGenerators, expectedOutput) :
global gTestOutput
# update args
previousArgs = sys.argv
sys.argv = []
sys.argv.append(getProgramName())
sys.argv.extend(newProgramArgs)
# register command factory
setExecutionContextFactory(TestExecutionContextFactory())
commandObject=None
parser = None
startTest(testName)
try:
gplog.setup_tool_logging(gProgramName,unix.getLocalHostname(),unix.getUserName(),nonuser=False)
parser = createOptionParserFn()
(options, args) = parser.parse_args()
gplog.enable_verbose_logging()
commandObject = createCommandFn(options, args)
exitCode = commandObject.run()
testOutput("sys.exit %s" % exitCode)
except ProgramArgumentValidationException, e:
testOutput( "Validation error: %s" % e.getMessage())
except ExceptionNoStackTraceNeeded, e:
testOutput( str(e))
except Exception, e:
testOutput( "EXCEPTION: %s\n%s" % (e, traceback.format_exc()))
except KeyboardInterrupt:
sys.exit('\nUser Interrupted')
finally:
if commandObject:
commandObject.cleanup()
# clean up test settings
sys.argv = previousArgs
setExecutionContextFactory(None)
if extraOutputGenerators is not None:
for gen in extraOutputGenerators:
gen.generate()
finishTest(expectedOutput)
def simple_test(testname, fnToCall, argsToFn, expectedOutput):
startTest(testname)
try:
fnToCall(argsToFn)
except Exception, e:
testOutput( "EXCEPTION: %s\n%s" % (e, traceback.format_exc()))
finishTest(expectedOutput)
def testTableOutput(lines):
lineWidth = []
for line in lines:
while len(lineWidth) < len(line):
lineWidth.append(0)
for i, field in enumerate(line):
lineWidth[i] = max(len(field), lineWidth[i])
# now print it all!
for line in lines:
outLine = []
for i, field in enumerate(line):
outLine.append(field.ljust(lineWidth[i] + 1))
msg = " | ".join(outLine)
testOutput(msg.strip())
def testOutputGpArray(gpArray):
segs = gpArray.getDbList()
def compareByDbId(left,right):
if left.getSegmentDbId() < right.getSegmentDbId(): return -1
elif left.getSegmentDbId() > right.getSegmentDbId(): return 1
else: return 0
segs.sort(compareByDbId)
lines = []
lines.append([
"dbid", "content", "role", "preferred_role", "mode", "status",
"hostname", "address", "port", "datadir", "replication_port"
])
for seg in segs:
line = [
str(seg.getSegmentDbId()),
str(seg.getSegmentContentId()),
str(seg.getSegmentRole()),
str(seg.getSegmentPreferredRole()),
str(seg.getSegmentMode()),
str(seg.getSegmentStatus()),
str(seg.getSegmentHostName()),
str(seg.getSegmentAddress()),
str(seg.getSegmentPort()),
str(seg.getSegmentDataDirectory()),
str(seg.getSegmentReplicationPort()),
]
lines.append(line)
testTableOutput(lines) | apache-2.0 |
lg-devs/android_kernel_lge_msm8974 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
mmardini/django | tests/proxy_model_inheritance/tests.py | 34 | 1921 | from __future__ import absolute_import, unicode_literals
import os
from django.core.management import call_command
from django.test import TestCase, TransactionTestCase
from django.test.utils import override_system_checks, extend_sys_path
from django.utils._os import upath
from .models import (ConcreteModel, ConcreteModelSubclass,
ConcreteModelSubclassProxy)
class ProxyModelInheritanceTests(TransactionTestCase):
"""
Proxy model inheritance across apps can result in migrate not creating the table
for the proxied model (as described in #12286). This test creates two dummy
apps and calls migrate, then verifies that the table has been created.
"""
available_apps = []
# `auth` app is imported, but not installed in this test, so we need to
# exclude checks registered by this app.
@override_system_checks([])
def test_table_exists(self):
with extend_sys_path(os.path.dirname(os.path.abspath(upath(__file__)))):
with self.modify_settings(INSTALLED_APPS={'append': ['app1', 'app2']}):
call_command('migrate', verbosity=0)
from app1.models import ProxyModel
from app2.models import NiceModel
self.assertEqual(NiceModel.objects.all().count(), 0)
self.assertEqual(ProxyModel.objects.all().count(), 0)
class MultiTableInheritanceProxyTest(TestCase):
def test_model_subclass_proxy(self):
"""
Deleting an instance of a model proxying a multi-table inherited
subclass should cascade delete down the whole inheritance chain (see
#18083).
"""
instance = ConcreteModelSubclassProxy.objects.create()
instance.delete()
self.assertEqual(0, ConcreteModelSubclassProxy.objects.count())
self.assertEqual(0, ConcreteModelSubclass.objects.count())
self.assertEqual(0, ConcreteModel.objects.count())
| bsd-3-clause |
LUTAN/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/kullback_leibler_test.py | 27 | 4410 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributions KL mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import normal
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# pylint: disable=protected-access
_DIVERGENCES = kullback_leibler._DIVERGENCES
_registered_kl = kullback_leibler._registered_kl
# pylint: enable=protected-access
class KLTest(test.TestCase):
def testRegistration(self):
class MyDist(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDist, MyDist)
def _kl(a, b, name=None): # pylint: disable=unused-argument,unused-variable
return name
a = MyDist(loc=0.0, scale=1.0)
self.assertEqual("OK", kullback_leibler.kl(a, a, name="OK"))
def testDomainErrorExceptions(self):
class MyDistException(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDistException, MyDistException)
# pylint: disable=unused-argument,unused-variable
def _kl(a, b, name=None):
return array_ops.identity([float("nan")])
# pylint: disable=unused-argument,unused-variable
with self.test_session():
a = MyDistException(loc=0.0, scale=1.0)
kl = kullback_leibler.kl(a, a, allow_nan_stats=False)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
kl.eval()
kl_ok = kullback_leibler.kl(a, a)
self.assertAllEqual([float("nan")], kl_ok.eval())
def testRegistrationFailures(self):
class MyDist(normal.Normal):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
kullback_leibler.RegisterKL(MyDist, MyDist)("blah")
# First registration is OK
kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)
def testExactRegistrationsAllMatch(self):
for (k, v) in _DIVERGENCES.items():
self.assertEqual(v, _registered_kl(*k))
def testIndirectRegistration(self):
class Sub1(normal.Normal):
pass
class Sub2(normal.Normal):
pass
class Sub11(Sub1):
pass
# pylint: disable=unused-argument,unused-variable
@kullback_leibler.RegisterKL(Sub1, Sub1)
def _kl11(a, b, name=None):
return "sub1-1"
@kullback_leibler.RegisterKL(Sub1, Sub2)
def _kl12(a, b, name=None):
return "sub1-2"
@kullback_leibler.RegisterKL(Sub2, Sub1)
def _kl21(a, b, name=None):
return "sub2-1"
# pylint: enable=unused-argument,unused_variable
sub1 = Sub1(loc=0.0, scale=1.0)
sub2 = Sub2(loc=0.0, scale=1.0)
sub11 = Sub11(loc=0.0, scale=1.0)
self.assertEqual("sub1-1", kullback_leibler.kl(sub1, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl(sub1, sub2))
self.assertEqual("sub2-1", kullback_leibler.kl(sub2, sub1))
self.assertEqual("sub1-1", kullback_leibler.kl(sub11, sub11))
self.assertEqual("sub1-1", kullback_leibler.kl(sub11, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl(sub11, sub2))
self.assertEqual("sub1-1", kullback_leibler.kl(sub11, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl(sub11, sub2))
self.assertEqual("sub2-1", kullback_leibler.kl(sub2, sub11))
self.assertEqual("sub1-1", kullback_leibler.kl(sub1, sub11))
if __name__ == "__main__":
test.main()
| apache-2.0 |
tlakshman26/cinder-https-changes | cinder/tests/unit/test_test.py | 22 | 2735 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the testing base code."""
import mock
from oslo_config import cfg
import oslo_messaging as messaging
from cinder import rpc
from cinder import test
class IsolationTestCase(test.TestCase):
"""Ensure that things are cleaned up after failed tests.
These tests don't really do much here, but if isolation fails a bunch
of other tests should fail.
"""
def test_service_isolation(self):
self.start_service('volume')
def test_rpc_consumer_isolation(self):
class NeverCalled(object):
def __getattribute__(*args):
assert False, "I should never get called."
server = rpc.get_server(messaging.Target(topic='volume',
server=cfg.CONF.host),
endpoints=[NeverCalled()])
server.start()
class MockAssertTestCase(test.TestCase):
"""Ensure that valid mock assert methods are used."""
def test_assert_has_calls(self):
mock_call = mock.MagicMock(return_value=None)
mock_call(1)
mock_call(2)
mock_call.assert_has_calls([mock.call(1), mock.call(2)])
def test_assert_any_call(self):
mock_call = mock.MagicMock(return_value=None)
mock_call(1)
mock_call(2)
mock_call(3)
mock_call.assert_any_call(1)
def test_assert_called_with(self):
mock_call = mock.MagicMock(return_value=None)
mock_call(1, 'foo', a='123')
mock_call.assert_called_with(1, 'foo', a='123')
def test_assert_called_once_with(self):
mock_call = mock.MagicMock(return_value=None)
mock_call(1, 'foobar', a='123')
mock_call.assert_called_once_with(1, 'foobar', a='123')
def test_invalid_assert_calls(self):
mock_call = mock.MagicMock()
self.assertRaises(AttributeError, lambda: mock_call.assert_called)
self.assertRaises(AttributeError,
lambda: mock_call.assert_once_called_with)
| apache-2.0 |
uclaacm/ai-combat | AICombat/virtual/queuebot.py | 1 | 4860 | """
queuebot.py
A pure utility virtualbot to be subclassed. The Queuebot provides a queue
decorator to be used on get_action, allowing subclasses to define action
sequences instead of juggling around action primitives.
"""
# Global imports
from collections import deque
# Local imports
import real.definitions as d
from virtual.virtualbot import Virtualbot
class Queuebot(Virtualbot):
def __init__(self, arena_data):
# Initialization
Virtualbot.__init__(self, arena_data)
# Queuebot stuff
self.queue_actions = deque()
self.queue_preempt = False
self.queue_cleared = False
"""
A decorator for get_action. By default, get_action is somewhat unwieldy to
use because it is required to return single primitive actions. This is
somewhat nice for flexibility, but at the cost of developer-unfriendliness.
This decorator uses a queue_actions to save primitives and returns them in
sequence, only calling get_action if the queue is exhausted. A subclass can
then choose to push actions into the queue, and/or return a primitive. If
a primitive is returned, it is given priority over anything in the queue.
"""
@staticmethod
def queued(func):
def decorated(self, status):
# Save status updates
self.update_status(status)
# Initialize state variables
ready = self.state['action'] == d.action.WAIT
# Preemption mode (see description for preempt_queue())
if self.queue_preempt:
self.queue_cleared = False
### Ask subclass for an explicit action
ret = func(self, status)
if ret:
return ret
### Realbot still performing an action
if not ready and not self.queue_cleared:
return {"action": d.action.CONTINUE}
### There actions left in the queue
if self.queue_actions:
return self.queue_actions.popleft()
### Subclass cleared the queue
if self.queue_cleared:
return {"action": d.action.WAIT}
# Normal mode
else:
### Realbot still performing an action
if not ready:
return {"action": d.action.CONTINUE}
### There are actions left in the queue
if self.queue_actions:
return self.queue_actions.popleft()
### Ask subclass for an explicit action
ret = func(self, status)
if ret:
return ret
### Now there are actions left in the queue
if self.queue_actions:
return self.queue_actions.popleft()
# No other conditions are satisfied
return {"action": d.action.CONTINUE}
return decorated
"""
Used by a subclass to tell Queuebot that it wants to preempt the queue.
What this means is that, instead of executing every action in the queue
before asking the subclass for more actions, the Queuebot will now always
ask for new actions before executing its queue. This allows bots to
continue monitoring their status while executing an action sequence.
"""
def preempt_queue(self, value=True):
self.queue_preempt = value
"""
Lets subclasses know if the queue is empty. Useful during preemption mode.
"""
def is_queue_empty(self):
return len(self.queue_actions) == 0
"""
Clears the queue of actions. If the bot is currently executing an action
(e.g. walk), it will also be canceled. Useful during preemption mode.
"""
def clear_queue(self):
self.queue_actions = deque()
self.queue_cleared = True
def queue_left(self):
a = {"action": d.action.TURN,
"direction": d.direction.LEFT}
self.queue_actions.append(a)
def queue_right(self):
a = {"action": d.action.TURN,
"direction": d.direction.RIGHT}
self.queue_actions.append(a)
def queue_wait(self):
a = {"action": d.action.WAIT}
self.queue_actions.append(a)
def queue_continue(self):
a = {"action": d.action.CONTINUE}
self.queue_actions.append(a)
def queue_walk(self, distance=None):
if distance is None:
distance = self.step
a = {"action": d.action.WALK,
"distance": distance}
self.queue_actions.append(a)
def queue_shoot(self):
a = {"action": d.action.SHOOT}
self.queue_actions.append(a)
def queue_reverse(self):
self.queue_left()
self.queue_left()
def queue_all(self, actions):
self.queue_actions.extend(actions)
| gpl-2.0 |
0k/odoo | addons/marketing_campaign_crm_demo/__openerp__.py | 52 | 1694 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Marketing Campaign - Demo',
'version': '1.0',
'depends': ['marketing_campaign',
'crm',
],
'author': 'OpenERP SA',
'category': 'Marketing',
'description': """
Demo data for the module marketing_campaign.
============================================
Creates demo data like leads, campaigns and segments for the module marketing_campaign.
""",
'website': 'https://www.odoo.com/page/lead-automation',
'data': [],
'demo': ['marketing_campaign_demo.xml'],
'installable': True,
'auto_install': False,
'images': ['images/campaigns.jpeg','images/email_templates.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mSenyor/sl4a | python/src/Tools/msi/msi.py | 30 | 58459 | # Python MSI Generator
# (C) 2003 Martin v. Loewis
# See "FOO" in comments refers to MSDN sections with the title FOO.
import msilib, schema, sequence, os, glob, time, re, shutil
from msilib import Feature, CAB, Directory, Dialog, Binary, add_data
import uisample
from win32com.client import constants
from distutils.spawn import find_executable
from uuids import product_codes
# Settings can be overridden in config.py below
# 0 for official python.org releases
# 1 for intermediate releases by anybody, with
# a new product code for every package.
snapshot = 1
# 1 means that file extension is px, not py,
# and binaries start with x
testpackage = 0
# Location of build tree
srcdir = os.path.abspath("../..")
# Text to be displayed as the version in dialogs etc.
# goes into file name and ProductCode. Defaults to
# current_version.day for Snapshot, current_version otherwise
full_current_version = None
# Is Tcl available at all?
have_tcl = True
# path to PCbuild directory
PCBUILD="PCbuild"
# msvcrt version
MSVCR = "90"
try:
from config import *
except ImportError:
pass
# Extract current version from Include/patchlevel.h
lines = open(srcdir + "/Include/patchlevel.h").readlines()
major = minor = micro = level = serial = None
levels = {
'PY_RELEASE_LEVEL_ALPHA':0xA,
'PY_RELEASE_LEVEL_BETA': 0xB,
'PY_RELEASE_LEVEL_GAMMA':0xC,
'PY_RELEASE_LEVEL_FINAL':0xF
}
for l in lines:
if not l.startswith("#define"):
continue
l = l.split()
if len(l) != 3:
continue
_, name, value = l
if name == 'PY_MAJOR_VERSION': major = value
if name == 'PY_MINOR_VERSION': minor = value
if name == 'PY_MICRO_VERSION': micro = value
if name == 'PY_RELEASE_LEVEL': level = levels[value]
if name == 'PY_RELEASE_SERIAL': serial = value
short_version = major+"."+minor
# See PC/make_versioninfo.c
FIELD3 = 1000*int(micro) + 10*level + int(serial)
current_version = "%s.%d" % (short_version, FIELD3)
# This should never change. The UpgradeCode of this package can be
# used in the Upgrade table of future packages to make the future
# package replace this one. See "UpgradeCode Property".
# upgrade_code gets set to upgrade_code_64 when we have determined
# that the target is Win64.
upgrade_code_snapshot='{92A24481-3ECB-40FC-8836-04B7966EC0D5}'
upgrade_code='{65E6DE48-A358-434D-AA4F-4AF72DB4718F}'
upgrade_code_64='{6A965A0C-6EE6-4E3A-9983-3263F56311EC}'
if snapshot:
current_version = "%s.%s.%s" % (major, minor, int(time.time()/3600/24))
product_code = msilib.gen_uuid()
else:
product_code = product_codes[current_version]
if full_current_version is None:
full_current_version = current_version
extensions = [
'bz2.pyd',
'pyexpat.pyd',
'select.pyd',
'unicodedata.pyd',
'winsound.pyd',
'_elementtree.pyd',
'_bsddb.pyd',
'_socket.pyd',
'_ssl.pyd',
'_testcapi.pyd',
'_tkinter.pyd',
'_msi.pyd',
'_ctypes.pyd',
'_ctypes_test.pyd',
'_sqlite3.pyd',
'_hashlib.pyd',
'_multiprocessing.pyd'
]
# Well-known component UUIDs
# These are needed for SharedDLLs reference counter; if
# a different UUID was used for each incarnation of, say,
# python24.dll, an upgrade would set the reference counter
# from 1 to 2 (due to what I consider a bug in MSI)
# Using the same UUID is fine since these files are versioned,
# so Installer will always keep the newest version.
# NOTE: All uuids are self generated.
pythondll_uuid = {
"24":"{9B81E618-2301-4035-AC77-75D9ABEB7301}",
"25":"{2e41b118-38bd-4c1b-a840-6977efd1b911}",
"26":"{34ebecac-f046-4e1c-b0e3-9bac3cdaacfa}",
} [major+minor]
# Compute the name that Sphinx gives to the docfile
docfile = ""
if micro:
docfile = str(micro)
if level < 0xf:
docfile = '%x%s' % (level, serial)
docfile = 'python%s%s%s.chm' % (major, minor, docfile)
# Build the mingw import library, libpythonXY.a
# This requires 'nm' and 'dlltool' executables on your PATH
def build_mingw_lib(lib_file, def_file, dll_file, mingw_lib):
warning = "WARNING: %s - libpythonXX.a not built"
nm = find_executable('nm')
dlltool = find_executable('dlltool')
if not nm or not dlltool:
print warning % "nm and/or dlltool were not found"
return False
nm_command = '%s -Cs %s' % (nm, lib_file)
dlltool_command = "%s --dllname %s --def %s --output-lib %s" % \
(dlltool, dll_file, def_file, mingw_lib)
export_match = re.compile(r"^_imp__(.*) in python\d+\.dll").match
f = open(def_file,'w')
print >>f, "LIBRARY %s" % dll_file
print >>f, "EXPORTS"
nm_pipe = os.popen(nm_command)
for line in nm_pipe.readlines():
m = export_match(line)
if m:
print >>f, m.group(1)
f.close()
exit = nm_pipe.close()
if exit:
print warning % "nm did not run successfully"
return False
if os.system(dlltool_command) != 0:
print warning % "dlltool did not run successfully"
return False
return True
# Target files (.def and .a) go in PCBuild directory
lib_file = os.path.join(srcdir, PCBUILD, "python%s%s.lib" % (major, minor))
def_file = os.path.join(srcdir, PCBUILD, "python%s%s.def" % (major, minor))
dll_file = "python%s%s.dll" % (major, minor)
mingw_lib = os.path.join(srcdir, PCBUILD, "libpython%s%s.a" % (major, minor))
have_mingw = build_mingw_lib(lib_file, def_file, dll_file, mingw_lib)
# Determine the target architechture
dll_path = os.path.join(srcdir, PCBUILD, dll_file)
msilib.set_arch_from_file(dll_path)
if msilib.pe_type(dll_path) != msilib.pe_type("msisupport.dll"):
raise SystemError, "msisupport.dll for incorrect architecture"
if msilib.Win64:
upgrade_code = upgrade_code_64
# Bump the last digit of the code by one, so that 32-bit and 64-bit
# releases get separate product codes
digit = hex((int(product_code[-2],16)+1)%16)[-1]
product_code = product_code[:-2] + digit + '}'
if testpackage:
ext = 'px'
testprefix = 'x'
else:
ext = 'py'
testprefix = ''
if msilib.Win64:
SystemFolderName = "[System64Folder]"
registry_component = 4|256
else:
SystemFolderName = "[SystemFolder]"
registry_component = 4
msilib.reset()
# condition in which to install pythonxy.dll in system32:
# a) it is Windows 9x or
# b) it is NT, the user is privileged, and has chosen per-machine installation
sys32cond = "(Windows9x or (Privileged and ALLUSERS))"
def build_database():
"""Generate an empty database, with just the schema and the
Summary information stream."""
if snapshot:
uc = upgrade_code_snapshot
else:
uc = upgrade_code
if msilib.Win64:
productsuffix = " (64-bit)"
else:
productsuffix = ""
# schema represents the installer 2.0 database schema.
# sequence is the set of standard sequences
# (ui/execute, admin/advt/install)
db = msilib.init_database("python-%s%s.msi" % (full_current_version, msilib.arch_ext),
schema, ProductName="Python "+full_current_version+productsuffix,
ProductCode=product_code,
ProductVersion=current_version,
Manufacturer=u"Python Software Foundation",
request_uac = True)
# The default sequencing of the RemoveExistingProducts action causes
# removal of files that got just installed. Place it after
# InstallInitialize, so we first uninstall everything, but still roll
# back in case the installation is interrupted
msilib.change_sequence(sequence.InstallExecuteSequence,
"RemoveExistingProducts", 1510)
msilib.add_tables(db, sequence)
# We cannot set ALLUSERS in the property table, as this cannot be
# reset if the user choses a per-user installation. Instead, we
# maintain WhichUsers, which can be "ALL" or "JUSTME". The UI manages
# this property, and when the execution starts, ALLUSERS is set
# accordingly.
add_data(db, "Property", [("UpgradeCode", uc),
("WhichUsers", "ALL"),
("ProductLine", "Python%s%s" % (major, minor)),
])
db.Commit()
return db
def remove_old_versions(db):
"Fill the upgrade table."
start = "%s.%s.0" % (major, minor)
# This requests that feature selection states of an older
# installation should be forwarded into this one. Upgrading
# requires that both the old and the new installation are
# either both per-machine or per-user.
migrate_features = 1
# See "Upgrade Table". We remove releases with the same major and
# minor version. For an snapshot, we remove all earlier snapshots. For
# a release, we remove all snapshots, and all earlier releases.
if snapshot:
add_data(db, "Upgrade",
[(upgrade_code_snapshot, start,
current_version,
None, # Ignore language
migrate_features,
None, # Migrate ALL features
"REMOVEOLDSNAPSHOT")])
props = "REMOVEOLDSNAPSHOT"
else:
add_data(db, "Upgrade",
[(upgrade_code, start, current_version,
None, migrate_features, None, "REMOVEOLDVERSION"),
(upgrade_code_snapshot, start, "%s.%d.0" % (major, int(minor)+1),
None, migrate_features, None, "REMOVEOLDSNAPSHOT")])
props = "REMOVEOLDSNAPSHOT;REMOVEOLDVERSION"
props += ";TARGETDIR;DLLDIR"
# Installer collects the product codes of the earlier releases in
# these properties. In order to allow modification of the properties,
# they must be declared as secure. See "SecureCustomProperties Property"
add_data(db, "Property", [("SecureCustomProperties", props)])
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
if kw.get("bitmap", True):
self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 135, 10, 220, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
def add_ui(db):
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
add_data(db, 'ActionText', uisample.ActionText)
add_data(db, 'UIText', uisample.UIText)
# Bitmaps
if not os.path.exists(srcdir+r"\PC\python_icon.exe"):
raise "Run icons.mak in PC directory"
add_data(db, "Binary",
[("PythonWin", msilib.Binary(r"%s\PCbuild\installer.bmp" % srcdir)), # 152x328 pixels
("py.ico",msilib.Binary(srcdir+r"\PC\py.ico")),
])
add_data(db, "Icon",
[("python_icon.exe", msilib.Binary(srcdir+r"\PC\python_icon.exe"))])
# Scripts
# CheckDir sets TargetExists if TARGETDIR exists.
# UpdateEditIDLE sets the REGISTRY.tcl component into
# the installed/uninstalled state according to both the
# Extensions and TclTk features.
if os.system("nmake /nologo /c /f msisupport.mak") != 0:
raise "'nmake /f msisupport.mak' failed"
add_data(db, "Binary", [("Script", msilib.Binary("msisupport.dll"))])
# See "Custom Action Type 1"
if msilib.Win64:
CheckDir = "CheckDir"
UpdateEditIDLE = "UpdateEditIDLE"
else:
CheckDir = "_CheckDir@4"
UpdateEditIDLE = "_UpdateEditIDLE@4"
add_data(db, "CustomAction",
[("CheckDir", 1, "Script", CheckDir)])
if have_tcl:
add_data(db, "CustomAction",
[("UpdateEditIDLE", 1, "Script", UpdateEditIDLE)])
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair")])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
compileargs = r'-Wi "[TARGETDIR]Lib\compileall.py" -f -x bad_coding|badsyntax|site-packages|py3_ "[TARGETDIR]Lib"'
lib2to3args = r'-c "import lib2to3.pygram, lib2to3.patcomp;lib2to3.patcomp.PatternCompiler()"'
# See "CustomAction Table"
add_data(db, "CustomAction", [
# msidbCustomActionTypeFirstSequence + msidbCustomActionTypeTextData + msidbCustomActionTypeProperty
# See "Custom Action Type 51",
# "Custom Action Execution Scheduling Options"
("InitialTargetDir", 307, "TARGETDIR",
"[WindowsVolume]Python%s%s" % (major, minor)),
("SetDLLDirToTarget", 307, "DLLDIR", "[TARGETDIR]"),
("SetDLLDirToSystem32", 307, "DLLDIR", SystemFolderName),
# msidbCustomActionTypeExe + msidbCustomActionTypeSourceFile
# See "Custom Action Type 18"
("CompilePyc", 18, "python.exe", compileargs),
("CompilePyo", 18, "python.exe", "-O "+compileargs),
("CompileGrammar", 18, "python.exe", lib2to3args),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
("InitialTargetDir", 'TARGETDIR=""', 750),
# In the user interface, assume all-users installation if privileged.
("SetDLLDirToSystem32", 'DLLDIR="" and ' + sys32cond, 751),
("SetDLLDirToTarget", 'DLLDIR="" and not ' + sys32cond, 752),
("SelectDirectoryDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, "AdminUISequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToTarget", 'DLLDIR=""', 751),
])
# Execute Sequences
add_data(db, "InstallExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToSystem32", 'DLLDIR="" and ' + sys32cond, 751),
("SetDLLDirToTarget", 'DLLDIR="" and not ' + sys32cond, 752),
("UpdateEditIDLE", None, 1050),
("CompilePyc", "COMPILEALL", 6800),
("CompilePyo", "COMPILEALL", 6801),
("CompileGrammar", "COMPILEALL", 6802),
])
add_data(db, "AdminExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
("SetDLLDirToTarget", 'DLLDIR=""', 751),
("CompilePyc", "COMPILEALL", 6800),
("CompilePyo", "COMPILEALL", 6801),
("CompileGrammar", "COMPILEALL", 6802),
])
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 135, 70, 220, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 135, 155, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
# See "ControlEvent Table". Parameters are the event, the parameter
# to the action, and optionally the condition for the event, and the order
# of events.
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 135, 70, 220, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 135, 155, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Acknowledgements", 135, 95, 220, 120, 0x30003,
"Special Windows thanks to:\n"
" Mark Hammond, without whose years of freely \n"
" shared Windows expertise, Python for Windows \n"
" would still be Python for DOS.")
c = exit_dialog.text("warning", 135, 200, 220, 40, 0x30003,
"{\\VerdanaRed9}Warning: Python 2.5.x is the last "
"Python release for Windows 9x.")
c.condition("Hide", "NOT Version9X")
exit_dialog.text("Description", 135, 235, 220, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
"py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
costing.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
"py.ico", None, None)
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 135, 70, 220, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 135, 110, 220, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 135, 135, 220, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Target directory selection
seldlg = PyDialog(db, "SelectDirectoryDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Destination Directory")
c = seldlg.text("Existing", 135, 25, 235, 30, 0x30003,
"{\VerdanaRed9}This update will replace your existing [ProductLine] installation.")
c.condition("Hide", 'REMOVEOLDVERSION="" and REMOVEOLDSNAPSHOT=""')
seldlg.text("Description", 135, 50, 220, 40, 0x30003,
"Please select a directory for the [ProductName] files.")
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
c.event("DoAction", "CheckDir", "TargetExistsOk<>1", order=1)
# If the target exists, but we found that we are going to remove old versions, don't bother
# confirming that the target directory exists. Strictly speaking, we should determine that
# the target directory is indeed the target of the product that we are going to remove, but
# I don't know how to do that.
c.event("SpawnDialog", "ExistingDirectoryDlg", 'TargetExists=1 and REMOVEOLDVERSION="" and REMOVEOLDSNAPSHOT=""', 2)
c.event("SetTargetPath", "TARGETDIR", 'TargetExists=0 or REMOVEOLDVERSION<>"" or REMOVEOLDSNAPSHOT<>""', 3)
c.event("SpawnWaitDialog", "WaitForCostingDlg", "CostingComplete=1", 4)
c.event("NewDialog", "SelectFeaturesDlg", 'TargetExists=0 or REMOVEOLDVERSION<>"" or REMOVEOLDSNAPSHOT<>""', 5)
c = seldlg.cancel("Cancel", "DirectoryCombo")
c.event("SpawnDialog", "CancelDlg")
seldlg.control("DirectoryCombo", "DirectoryCombo", 135, 70, 172, 80, 393219,
"TARGETDIR", None, "DirectoryList", None)
seldlg.control("DirectoryList", "DirectoryList", 135, 90, 208, 136, 3, "TARGETDIR",
None, "PathEdit", None)
seldlg.control("PathEdit", "PathEdit", 135, 230, 206, 16, 3, "TARGETDIR", None, "Next", None)
c = seldlg.pushbutton("Up", 306, 70, 18, 18, 3, "Up", None)
c.event("DirectoryListUp", "0")
c = seldlg.pushbutton("NewDir", 324, 70, 30, 18, 3, "New", None)
c.event("DirectoryListNew", "0")
#####################################################################
# SelectFeaturesDlg
features = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal|track_disk_space,
title, "Tree", "Next", "Cancel")
features.title("Customize [ProductName]")
features.text("Description", 135, 35, 220, 15, 0x30003,
"Select the way you want features to be installed.")
features.text("Text", 135,45,220,30, 3,
"Click on the icons in the tree below to change the way features will be installed.")
c=features.back("< Back", "Next")
c.event("NewDialog", "SelectDirectoryDlg")
c=features.next("Next >", "Cancel")
c.mapping("SelectionNoItems", "Enabled")
c.event("SpawnDialog", "DiskCostDlg", "OutOfDiskSpace=1", order=1)
c.event("EndDialog", "Return", "OutOfDiskSpace<>1", order=2)
c=features.cancel("Cancel", "Tree")
c.event("SpawnDialog", "CancelDlg")
# The browse property is not used, since we have only a single target path (selected already)
features.control("Tree", "SelectionTree", 135, 75, 220, 95, 7, "_BrowseProperty",
"Tree of selections", "Back", None)
#c=features.pushbutton("Reset", 42, 243, 56, 17, 3, "Reset", "DiskCost")
#c.mapping("SelectionNoItems", "Enabled")
#c.event("Reset", "0")
features.control("Box", "GroupBox", 135, 170, 225, 90, 1, None, None, None, None)
c=features.xbutton("DiskCost", "Disk &Usage", None, 0.10)
c.mapping("SelectionNoItems","Enabled")
c.event("SpawnDialog", "DiskCostDlg")
c=features.xbutton("Advanced", "Advanced", None, 0.30)
c.event("SpawnDialog", "AdvancedDlg")
c=features.text("ItemDescription", 140, 180, 210, 30, 3,
"Multiline description of the currently selected item.")
c.mapping("SelectionDescription","Text")
c=features.text("ItemSize", 140, 210, 210, 45, 3,
"The size of the currently selected item.")
c.mapping("SelectionSize", "Text")
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 135, 60, 235, 80, 3,
"WhichUsers", "", "Next")
g.condition("Disable", "VersionNT=600") # Not available on Vista and Windows 2008
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 235, 20, "Install just for me (not available on Windows Vista)")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", order = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Advanced Dialog.
advanced = PyDialog(db, "AdvancedDlg", x, y, w, h, modal, title,
"CompilePyc", "Ok", "Ok")
advanced.title("Advanced Options for [ProductName]")
# A radio group with two options: allusers, justme
advanced.checkbox("CompilePyc", 135, 60, 230, 50, 3,
"COMPILEALL", "Compile .py files to byte code after installation", "Ok")
c = advanced.cancel("Ok", "CompilePyc", name="Ok") # Button just has location of cancel button.
c.event("EndDialog", "Return")
#####################################################################
# Existing Directory dialog
dlg = Dialog(db, "ExistingDirectoryDlg", 50, 30, 200, 80, modal, title,
"No", "No", "No")
dlg.text("Title", 10, 20, 180, 40, 3,
"[TARGETDIR] exists. Are you sure you want to overwrite existing files?")
c=dlg.pushbutton("Yes", 30, 60, 55, 17, 3, "Yes", "No")
c.event("[TargetExists]", "0", order=1)
c.event("[TargetExistsOk]", "1", order=2)
c.event("EndDialog", "Return", order=3)
c=dlg.pushbutton("No", 115, 60, 55, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 135, 63, 230, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 135, 108, 230, 60, 3,
"MaintenanceForm_Action", "", "Next")
g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
# See "Feature Table". The feature level is 1 for all features,
# and the feature attributes are 0 for the DefaultFeature, and
# FollowParent for all other features. The numbers are the Display
# column.
def add_features(db):
# feature attributes:
# msidbFeatureAttributesFollowParent == 2
# msidbFeatureAttributesDisallowAdvertise == 8
# Features that need to be installed with together with the main feature
# (i.e. additional Python libraries) need to follow the parent feature.
# Features that have no advertisement trigger (e.g. the test suite)
# must not support advertisement
global default_feature, tcltk, htmlfiles, tools, testsuite, ext_feature, private_crt
default_feature = Feature(db, "DefaultFeature", "Python",
"Python Interpreter and Libraries",
1, directory = "TARGETDIR")
shared_crt = Feature(db, "SharedCRT", "MSVCRT", "C Run-Time (system-wide)", 0,
level=0)
private_crt = Feature(db, "PrivateCRT", "MSVCRT", "C Run-Time (private)", 0,
level=0)
add_data(db, "Condition", [("SharedCRT", 1, sys32cond),
("PrivateCRT", 1, "not "+sys32cond)])
# We don't support advertisement of extensions
ext_feature = Feature(db, "Extensions", "Register Extensions",
"Make this Python installation the default Python installation", 3,
parent = default_feature, attributes=2|8)
if have_tcl:
tcltk = Feature(db, "TclTk", "Tcl/Tk", "Tkinter, IDLE, pydoc", 5,
parent = default_feature, attributes=2)
htmlfiles = Feature(db, "Documentation", "Documentation",
"Python HTMLHelp File", 7, parent = default_feature)
tools = Feature(db, "Tools", "Utility Scripts",
"Python utility scripts (Tools/", 9,
parent = default_feature, attributes=2)
testsuite = Feature(db, "Testsuite", "Test suite",
"Python test suite (Lib/test/)", 11,
parent = default_feature, attributes=2|8)
def extract_msvcr90():
# Find the redistributable files
if msilib.Win64:
arch = "amd64"
else:
arch = "x86"
dir = os.path.join(os.environ['VS90COMNTOOLS'], r"..\..\VC\redist\%s\Microsoft.VC90.CRT" % arch)
result = []
installer = msilib.MakeInstaller()
# omit msvcm90 and msvcp90, as they aren't really needed
files = ["Microsoft.VC90.CRT.manifest", "msvcr90.dll"]
for f in files:
path = os.path.join(dir, f)
kw = {'src':path}
if f.endswith('.dll'):
kw['version'] = installer.FileVersion(path, 0)
kw['language'] = installer.FileVersion(path, 1)
result.append((f, kw))
return result
def generate_license():
import shutil, glob
out = open("LICENSE.txt", "w")
shutil.copyfileobj(open(os.path.join(srcdir, "LICENSE")), out)
shutil.copyfileobj(open("crtlicense.txt"), out)
for name, pat, file in (("bzip2","bzip2-*", "LICENSE"),
("Berkeley DB", "db-*", "LICENSE"),
("openssl", "openssl-*", "LICENSE"),
("Tcl", "tcl8*", "license.terms"),
("Tk", "tk8*", "license.terms"),
("Tix", "tix-*", "license.terms")):
out.write("\nThis copy of Python includes a copy of %s, which is licensed under the following terms:\n\n" % name)
dirs = glob.glob(srcdir+"/../"+pat)
if not dirs:
raise ValueError, "Could not find "+srcdir+"/../"+pat
if len(dirs) > 2:
raise ValueError, "Multiple copies of "+pat
dir = dirs[0]
shutil.copyfileobj(open(os.path.join(dir, file)), out)
out.close()
class PyDirectory(Directory):
"""By default, all components in the Python installer
can run from source."""
def __init__(self, *args, **kw):
if not kw.has_key("componentflags"):
kw['componentflags'] = 2 #msidbComponentAttributesOptional
Directory.__init__(self, *args, **kw)
# See "File Table", "Component Table", "Directory Table",
# "FeatureComponents Table"
def add_files(db):
cab = CAB("python")
tmpfiles = []
# Add all executables, icons, text files into the TARGETDIR component
root = PyDirectory(db, cab, None, srcdir, "TARGETDIR", "SourceDir")
default_feature.set_current()
if not msilib.Win64:
root.add_file("%s/w9xpopen.exe" % PCBUILD)
root.add_file("README.txt", src="README")
root.add_file("NEWS.txt", src="Misc/NEWS")
generate_license()
root.add_file("LICENSE.txt", src=os.path.abspath("LICENSE.txt"))
root.start_component("python.exe", keyfile="python.exe")
root.add_file("%s/python.exe" % PCBUILD)
root.start_component("pythonw.exe", keyfile="pythonw.exe")
root.add_file("%s/pythonw.exe" % PCBUILD)
# msidbComponentAttributesSharedDllRefCount = 8, see "Component Table"
dlldir = PyDirectory(db, cab, root, srcdir, "DLLDIR", ".")
pydll = "python%s%s.dll" % (major, minor)
pydllsrc = os.path.join(srcdir, PCBUILD, pydll)
dlldir.start_component("DLLDIR", flags = 8, keyfile = pydll, uuid = pythondll_uuid)
installer = msilib.MakeInstaller()
pyversion = installer.FileVersion(pydllsrc, 0)
if not snapshot:
# For releases, the Python DLL has the same version as the
# installer package.
assert pyversion.split(".")[:3] == current_version.split(".")
dlldir.add_file("%s/python%s%s.dll" % (PCBUILD, major, minor),
version=pyversion,
language=installer.FileVersion(pydllsrc, 1))
DLLs = PyDirectory(db, cab, root, srcdir + "/" + PCBUILD, "DLLs", "DLLS|DLLs")
# msvcr90.dll: Need to place the DLL and the manifest into the root directory,
# plus another copy of the manifest in the DLLs directory, with the manifest
# pointing to the root directory
root.start_component("msvcr90", feature=private_crt)
# Results are ID,keyword pairs
manifest, crtdll = extract_msvcr90()
root.add_file(manifest[0], **manifest[1])
root.add_file(crtdll[0], **crtdll[1])
# Copy the manifest
# Actually, don't do that anymore - no DLL in DLLs should have a manifest
# dependency on msvcr90.dll anymore, so this should not be necessary
#manifest_dlls = manifest[0]+".root"
#open(manifest_dlls, "w").write(open(manifest[1]['src']).read().replace("msvcr","../msvcr"))
#DLLs.start_component("msvcr90_dlls", feature=private_crt)
#DLLs.add_file(manifest[0], src=os.path.abspath(manifest_dlls))
# Now start the main component for the DLLs directory;
# no regular files have been added to the directory yet.
DLLs.start_component()
# Check if _ctypes.pyd exists
have_ctypes = os.path.exists(srcdir+"/%s/_ctypes.pyd" % PCBUILD)
if not have_ctypes:
print "WARNING: _ctypes.pyd not found, ctypes will not be included"
extensions.remove("_ctypes.pyd")
# Add all .py files in Lib, except lib-tk, test
dirs={}
pydirs = [(root,"Lib")]
while pydirs:
# Commit every now and then, or else installer will complain
db.Commit()
parent, dir = pydirs.pop()
if dir == ".svn" or dir.startswith("plat-"):
continue
elif dir in ["lib-tk", "idlelib", "Icons"]:
if not have_tcl:
continue
tcltk.set_current()
elif dir in ['test', 'tests', 'data', 'output']:
# test: Lib, Lib/email, Lib/bsddb, Lib/ctypes, Lib/sqlite3
# tests: Lib/distutils
# data: Lib/email/test
# output: Lib/test
testsuite.set_current()
elif not have_ctypes and dir == "ctypes":
continue
else:
default_feature.set_current()
lib = PyDirectory(db, cab, parent, dir, dir, "%s|%s" % (parent.make_short(dir), dir))
# Add additional files
dirs[dir]=lib
lib.glob("*.txt")
if dir=='site-packages':
lib.add_file("README.txt", src="README")
continue
files = lib.glob("*.py")
files += lib.glob("*.pyw")
if files:
# Add an entry to the RemoveFile table to remove bytecode files.
lib.remove_pyc()
if dir.endswith('.egg-info'):
lib.add_file('entry_points.txt')
lib.add_file('PKG-INFO')
lib.add_file('top_level.txt')
lib.add_file('zip-safe')
continue
if dir=='test' and parent.physical=='Lib':
lib.add_file("185test.db")
lib.add_file("audiotest.au")
lib.add_file("cfgparser.1")
lib.add_file("sgml_input.html")
lib.add_file("test.xml")
lib.add_file("test.xml.out")
lib.add_file("testtar.tar")
lib.add_file("test_difflib_expect.html")
lib.add_file("check_soundcard.vbs")
lib.add_file("empty.vbs")
lib.glob("*.uue")
lib.glob("*.pem")
lib.glob("*.pck")
lib.add_file("readme.txt", src="README")
lib.add_file("zipdir.zip")
if dir=='decimaltestdata':
lib.glob("*.decTest")
if dir=='output':
lib.glob("test_*")
if dir=='idlelib':
lib.glob("*.def")
lib.add_file("idle.bat")
if dir=="Icons":
lib.glob("*.gif")
lib.add_file("idle.icns")
if dir=="command" and parent.physical=="distutils":
lib.glob("wininst*.exe")
if dir=="setuptools":
lib.add_file("cli.exe")
lib.add_file("gui.exe")
if dir=="lib2to3":
lib.removefile("pickle", "*.pickle")
if dir=="data" and parent.physical=="test" and parent.basedir.physical=="email":
# This should contain all non-.svn files listed in subversion
for f in os.listdir(lib.absolute):
if f.endswith(".txt") or f==".svn":continue
if f.endswith(".au") or f.endswith(".gif"):
lib.add_file(f)
else:
print "WARNING: New file %s in email/test/data" % f
for f in os.listdir(lib.absolute):
if os.path.isdir(os.path.join(lib.absolute, f)):
pydirs.append((lib, f))
# Add DLLs
default_feature.set_current()
lib = DLLs
lib.add_file("py.ico", src=srcdir+"/PC/py.ico")
lib.add_file("pyc.ico", src=srcdir+"/PC/pyc.ico")
dlls = []
tclfiles = []
for f in extensions:
if f=="_tkinter.pyd":
continue
if not os.path.exists(srcdir + "/" + PCBUILD + "/" + f):
print "WARNING: Missing extension", f
continue
dlls.append(f)
lib.add_file(f)
# Add sqlite
if msilib.msi_type=="Intel64;1033":
sqlite_arch = "/ia64"
elif msilib.msi_type=="x64;1033":
sqlite_arch = "/amd64"
tclsuffix = "64"
else:
sqlite_arch = ""
tclsuffix = ""
lib.add_file("sqlite3.dll")
if have_tcl:
if not os.path.exists("%s/%s/_tkinter.pyd" % (srcdir, PCBUILD)):
print "WARNING: Missing _tkinter.pyd"
else:
lib.start_component("TkDLLs", tcltk)
lib.add_file("_tkinter.pyd")
dlls.append("_tkinter.pyd")
tcldir = os.path.normpath(srcdir+("/../tcltk%s/bin" % tclsuffix))
for f in glob.glob1(tcldir, "*.dll"):
lib.add_file(f, src=os.path.join(tcldir, f))
# check whether there are any unknown extensions
for f in glob.glob1(srcdir+"/"+PCBUILD, "*.pyd"):
if f.endswith("_d.pyd"): continue # debug version
if f in dlls: continue
print "WARNING: Unknown extension", f
# Add headers
default_feature.set_current()
lib = PyDirectory(db, cab, root, "include", "include", "INCLUDE|include")
lib.glob("*.h")
lib.add_file("pyconfig.h", src="../PC/pyconfig.h")
# Add import libraries
lib = PyDirectory(db, cab, root, PCBUILD, "libs", "LIBS|libs")
for f in dlls:
lib.add_file(f.replace('pyd','lib'))
lib.add_file('python%s%s.lib' % (major, minor))
# Add the mingw-format library
if have_mingw:
lib.add_file('libpython%s%s.a' % (major, minor))
if have_tcl:
# Add Tcl/Tk
tcldirs = [(root, '../tcltk%s/lib' % tclsuffix, 'tcl')]
tcltk.set_current()
while tcldirs:
parent, phys, dir = tcldirs.pop()
lib = PyDirectory(db, cab, parent, phys, dir, "%s|%s" % (parent.make_short(dir), dir))
if not os.path.exists(lib.absolute):
continue
for f in os.listdir(lib.absolute):
if os.path.isdir(os.path.join(lib.absolute, f)):
tcldirs.append((lib, f, f))
else:
lib.add_file(f)
# Add tools
tools.set_current()
tooldir = PyDirectory(db, cab, root, "Tools", "Tools", "TOOLS|Tools")
for f in ['i18n', 'pynche', 'Scripts', 'versioncheck', 'webchecker']:
lib = PyDirectory(db, cab, tooldir, f, f, "%s|%s" % (tooldir.make_short(f), f))
lib.glob("*.py")
lib.glob("*.pyw", exclude=['pydocgui.pyw'])
lib.remove_pyc()
lib.glob("*.txt")
if f == "pynche":
x = PyDirectory(db, cab, lib, "X", "X", "X|X")
x.glob("*.txt")
if os.path.exists(os.path.join(lib.absolute, "README")):
lib.add_file("README.txt", src="README")
if f == 'Scripts':
lib.add_file("2to3.py", src="2to3")
if have_tcl:
lib.start_component("pydocgui.pyw", tcltk, keyfile="pydocgui.pyw")
lib.add_file("pydocgui.pyw")
# Add documentation
htmlfiles.set_current()
lib = PyDirectory(db, cab, root, "Doc", "Doc", "DOC|Doc")
lib.start_component("documentation", keyfile=docfile)
lib.add_file(docfile, src="build/htmlhelp/"+docfile)
cab.commit(db)
for f in tmpfiles:
os.unlink(f)
# See "Registry Table", "Component Table"
def add_registry(db):
# File extensions, associated with the REGISTRY.def component
# IDLE verbs depend on the tcltk feature.
# msidbComponentAttributesRegistryKeyPath = 4
# -1 for Root specifies "dependent on ALLUSERS property"
tcldata = []
if have_tcl:
tcldata = [
("REGISTRY.tcl", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"py.IDLE")]
add_data(db, "Component",
# msidbComponentAttributesRegistryKeyPath = 4
[("REGISTRY", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"InstallPath"),
("REGISTRY.doc", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"Documentation"),
("REGISTRY.def", msilib.gen_uuid(), "TARGETDIR", registry_component,
None, None)] + tcldata)
# See "FeatureComponents Table".
# The association between TclTk and pythonw.exe is necessary to make ICE59
# happy, because the installer otherwise believes that the IDLE and PyDoc
# shortcuts might get installed without pythonw.exe being install. This
# is not true, since installing TclTk will install the default feature, which
# will cause pythonw.exe to be installed.
# REGISTRY.tcl is not associated with any feature, as it will be requested
# through a custom action
tcldata = []
if have_tcl:
tcldata = [(tcltk.id, "pythonw.exe")]
add_data(db, "FeatureComponents",
[(default_feature.id, "REGISTRY"),
(htmlfiles.id, "REGISTRY.doc"),
(ext_feature.id, "REGISTRY.def")] +
tcldata
)
# Extensions are not advertised. For advertised extensions,
# we would need separate binaries that install along with the
# extension.
pat = r"Software\Classes\%sPython.%sFile\shell\%s\command"
ewi = "Edit with IDLE"
pat2 = r"Software\Classes\%sPython.%sFile\DefaultIcon"
pat3 = r"Software\Classes\%sPython.%sFile"
pat4 = r"Software\Classes\%sPython.%sFile\shellex\DropHandler"
tcl_verbs = []
if have_tcl:
tcl_verbs=[
("py.IDLE", -1, pat % (testprefix, "", ewi), "",
r'"[TARGETDIR]pythonw.exe" "[TARGETDIR]Lib\idlelib\idle.pyw" -n -e "%1"',
"REGISTRY.tcl"),
("pyw.IDLE", -1, pat % (testprefix, "NoCon", ewi), "",
r'"[TARGETDIR]pythonw.exe" "[TARGETDIR]Lib\idlelib\idle.pyw" -n -e "%1"',
"REGISTRY.tcl"),
]
add_data(db, "Registry",
[# Extensions
("py.ext", -1, r"Software\Classes\."+ext, "",
"Python.File", "REGISTRY.def"),
("pyw.ext", -1, r"Software\Classes\."+ext+'w', "",
"Python.NoConFile", "REGISTRY.def"),
("pyc.ext", -1, r"Software\Classes\."+ext+'c', "",
"Python.CompiledFile", "REGISTRY.def"),
("pyo.ext", -1, r"Software\Classes\."+ext+'o', "",
"Python.CompiledFile", "REGISTRY.def"),
# MIME types
("py.mime", -1, r"Software\Classes\."+ext, "Content Type",
"text/plain", "REGISTRY.def"),
("pyw.mime", -1, r"Software\Classes\."+ext+'w', "Content Type",
"text/plain", "REGISTRY.def"),
#Verbs
("py.open", -1, pat % (testprefix, "", "open"), "",
r'"[TARGETDIR]python.exe" "%1" %*', "REGISTRY.def"),
("pyw.open", -1, pat % (testprefix, "NoCon", "open"), "",
r'"[TARGETDIR]pythonw.exe" "%1" %*', "REGISTRY.def"),
("pyc.open", -1, pat % (testprefix, "Compiled", "open"), "",
r'"[TARGETDIR]python.exe" "%1" %*', "REGISTRY.def"),
] + tcl_verbs + [
#Icons
("py.icon", -1, pat2 % (testprefix, ""), "",
r'[DLLs]py.ico', "REGISTRY.def"),
("pyw.icon", -1, pat2 % (testprefix, "NoCon"), "",
r'[DLLs]py.ico', "REGISTRY.def"),
("pyc.icon", -1, pat2 % (testprefix, "Compiled"), "",
r'[DLLs]pyc.ico', "REGISTRY.def"),
# Descriptions
("py.txt", -1, pat3 % (testprefix, ""), "",
"Python File", "REGISTRY.def"),
("pyw.txt", -1, pat3 % (testprefix, "NoCon"), "",
"Python File (no console)", "REGISTRY.def"),
("pyc.txt", -1, pat3 % (testprefix, "Compiled"), "",
"Compiled Python File", "REGISTRY.def"),
# Drop Handler
("py.drop", -1, pat4 % (testprefix, ""), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
("pyw.drop", -1, pat4 % (testprefix, "NoCon"), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
("pyc.drop", -1, pat4 % (testprefix, "Compiled"), "",
"{60254CA5-953B-11CF-8C96-00AA00B8708C}", "REGISTRY.def"),
])
# Registry keys
prefix = r"Software\%sPython\PythonCore\%s" % (testprefix, short_version)
add_data(db, "Registry",
[("InstallPath", -1, prefix+r"\InstallPath", "", "[TARGETDIR]", "REGISTRY"),
("InstallGroup", -1, prefix+r"\InstallPath\InstallGroup", "",
"Python %s" % short_version, "REGISTRY"),
("PythonPath", -1, prefix+r"\PythonPath", "",
r"[TARGETDIR]Lib;[TARGETDIR]DLLs;[TARGETDIR]Lib\lib-tk", "REGISTRY"),
("Documentation", -1, prefix+r"\Help\Main Python Documentation", "",
"[TARGETDIR]Doc\\"+docfile , "REGISTRY.doc"),
("Modules", -1, prefix+r"\Modules", "+", None, "REGISTRY"),
("AppPaths", -1, r"Software\Microsoft\Windows\CurrentVersion\App Paths\Python.exe",
"", r"[TARGETDIR]Python.exe", "REGISTRY.def"),
("DisplayIcon", -1,
r"Software\Microsoft\Windows\CurrentVersion\Uninstall\%s" % product_code,
"DisplayIcon", "[TARGETDIR]python.exe", "REGISTRY.def")
])
# Shortcuts, see "Shortcut Table"
add_data(db, "Directory",
[("ProgramMenuFolder", "TARGETDIR", "."),
("MenuDir", "ProgramMenuFolder", "PY%s%s|%sPython %s.%s" % (major,minor,testprefix,major,minor))])
add_data(db, "RemoveFile",
[("MenuDir", "TARGETDIR", None, "MenuDir", 2)])
tcltkshortcuts = []
if have_tcl:
tcltkshortcuts = [
("IDLE", "MenuDir", "IDLE|IDLE (Python GUI)", "pythonw.exe",
tcltk.id, r'"[TARGETDIR]Lib\idlelib\idle.pyw"', None, None, "python_icon.exe", 0, None, "TARGETDIR"),
("PyDoc", "MenuDir", "MODDOCS|Module Docs", "pythonw.exe",
tcltk.id, r'"[TARGETDIR]Tools\scripts\pydocgui.pyw"', None, None, "python_icon.exe", 0, None, "TARGETDIR"),
]
add_data(db, "Shortcut",
tcltkshortcuts +
[# Advertised shortcuts: targets are features, not files
("Python", "MenuDir", "PYTHON|Python (command line)", "python.exe",
default_feature.id, None, None, None, "python_icon.exe", 2, None, "TARGETDIR"),
# Advertising the Manual breaks on (some?) Win98, and the shortcut lacks an
# icon first.
#("Manual", "MenuDir", "MANUAL|Python Manuals", "documentation",
# htmlfiles.id, None, None, None, None, None, None, None),
## Non-advertised shortcuts: must be associated with a registry component
("Manual", "MenuDir", "MANUAL|Python Manuals", "REGISTRY.doc",
"[#%s]" % docfile, None,
None, None, None, None, None, None),
("Uninstall", "MenuDir", "UNINST|Uninstall Python", "REGISTRY",
SystemFolderName+"msiexec", "/x%s" % product_code,
None, None, None, None, None, None),
])
db.Commit()
db = build_database()
try:
add_features(db)
add_ui(db)
add_files(db)
add_registry(db)
remove_old_versions(db)
db.Commit()
finally:
del db
| apache-2.0 |
cyril51/Sick-Beard | lib/hachoir_metadata/image.py | 90 | 10812 | from lib.hachoir_metadata.metadata import (registerExtractor,
Metadata, RootMetadata, MultipleMetadata)
from lib.hachoir_parser.image import (
BmpFile, IcoFile, PcxFile, GifFile, PngFile, TiffFile,
XcfFile, TargaFile, WMF_File, PsdFile)
from lib.hachoir_parser.image.png import getBitsPerPixel as pngBitsPerPixel
from lib.hachoir_parser.image.xcf import XcfProperty
from lib.hachoir_core.i18n import _
from lib.hachoir_metadata.safe import fault_tolerant
def computeComprRate(meta, compr_size):
"""
Compute image compression rate. Skip size of color palette, focus on
image pixels. Original size is width x height x bpp. Compressed size
is an argument (in bits).
Set "compr_data" with a string like "1.52x".
"""
if not meta.has("width") \
or not meta.has("height") \
or not meta.has("bits_per_pixel"):
return
if not compr_size:
return
orig_size = meta.get('width') * meta.get('height') * meta.get('bits_per_pixel')
meta.compr_rate = float(orig_size) / compr_size
class BmpMetadata(RootMetadata):
def extract(self, image):
if "header" not in image:
return
hdr = image["header"]
self.width = hdr["width"].value
self.height = hdr["height"].value
bpp = hdr["bpp"].value
if bpp:
if bpp <= 8 and "used_colors" in hdr:
self.nb_colors = hdr["used_colors"].value
self.bits_per_pixel = bpp
self.compression = hdr["compression"].display
self.format_version = u"Microsoft Bitmap version %s" % hdr.getFormatVersion()
self.width_dpi = hdr["horizontal_dpi"].value
self.height_dpi = hdr["vertical_dpi"].value
if "pixels" in image:
computeComprRate(self, image["pixels"].size)
class TiffMetadata(RootMetadata):
key_to_attr = {
"img_width": "width",
"img_height": "width",
# TODO: Enable that (need link to value)
# "description": "comment",
# "doc_name": "title",
# "orientation": "image_orientation",
}
def extract(self, tiff):
if "ifd" in tiff:
self.useIFD(tiff["ifd"])
def useIFD(self, ifd):
for field in ifd:
try:
attrname = self.key_to_attr[field.name]
except KeyError:
continue
if "value" not in field:
continue
value = field["value"].value
setattr(self, attrname, value)
class IcoMetadata(MultipleMetadata):
color_to_bpp = {
2: 1,
16: 4,
256: 8
}
def extract(self, icon):
for index, header in enumerate(icon.array("icon_header")):
image = Metadata(self)
# Read size and colors from header
image.width = header["width"].value
image.height = header["height"].value
bpp = header["bpp"].value
nb_colors = header["nb_color"].value
if nb_colors != 0:
image.nb_colors = nb_colors
if bpp == 0 and nb_colors in self.color_to_bpp:
bpp = self.color_to_bpp[nb_colors]
elif bpp == 0:
bpp = 8
image.bits_per_pixel = bpp
image.setHeader(_("Icon #%u (%sx%s)")
% (1+index, image.get("width", "?"), image.get("height", "?")))
# Read compression from data (if available)
key = "icon_data[%u]/header/codec" % index
if key in icon:
image.compression = icon[key].display
key = "icon_data[%u]/pixels" % index
if key in icon:
computeComprRate(image, icon[key].size)
# Store new image
self.addGroup("image[%u]" % index, image)
class PcxMetadata(RootMetadata):
@fault_tolerant
def extract(self, pcx):
self.width = 1 + pcx["xmax"].value
self.height = 1 + pcx["ymax"].value
self.width_dpi = pcx["horiz_dpi"].value
self.height_dpi = pcx["vert_dpi"].value
self.bits_per_pixel = pcx["bpp"].value
if 1 <= pcx["bpp"].value <= 8:
self.nb_colors = 2 ** pcx["bpp"].value
self.compression = _("Run-length encoding (RLE)")
self.format_version = "PCX: %s" % pcx["version"].display
if "image_data" in pcx:
computeComprRate(self, pcx["image_data"].size)
class XcfMetadata(RootMetadata):
# Map image type to bits/pixel
TYPE_TO_BPP = {0: 24, 1: 8, 2: 8}
def extract(self, xcf):
self.width = xcf["width"].value
self.height = xcf["height"].value
try:
self.bits_per_pixel = self.TYPE_TO_BPP[ xcf["type"].value ]
except KeyError:
pass
self.format_version = xcf["type"].display
self.readProperties(xcf)
@fault_tolerant
def processProperty(self, prop):
type = prop["type"].value
if type == XcfProperty.PROP_PARASITES:
for field in prop["data"]:
if "name" not in field or "data" not in field:
continue
if field["name"].value == "gimp-comment":
self.comment = field["data"].value
elif type == XcfProperty.PROP_COMPRESSION:
self.compression = prop["data/compression"].display
elif type == XcfProperty.PROP_RESOLUTION:
self.width_dpi = int(prop["data/xres"].value)
self.height_dpi = int(prop["data/yres"].value)
def readProperties(self, xcf):
for prop in xcf.array("property"):
self.processProperty(prop)
class PngMetadata(RootMetadata):
TEXT_TO_ATTR = {
"software": "producer",
}
def extract(self, png):
if "header" in png:
self.useHeader(png["header"])
if "time" in png:
self.useTime(png["time"])
if "physical" in png:
self.usePhysical(png["physical"])
for comment in png.array("text"):
if "text" not in comment:
continue
keyword = comment["keyword"].value
text = comment["text"].value
try:
key = self.TEXT_TO_ATTR[keyword.lower()]
setattr(self, key, text)
except KeyError:
if keyword.lower() != "comment":
self.comment = "%s=%s" % (keyword, text)
else:
self.comment = text
compr_size = sum( data.size for data in png.array("data") )
computeComprRate(self, compr_size)
@fault_tolerant
def useTime(self, field):
self.creation_date = field.value
@fault_tolerant
def usePhysical(self, field):
self.width_dpi = field["pixel_per_unit_x"].value
self.height_dpi = field["pixel_per_unit_y"].value
@fault_tolerant
def useHeader(self, header):
self.width = header["width"].value
self.height = header["height"].value
# Read number of colors and pixel format
if "/palette/size" in header:
nb_colors = header["/palette/size"].value // 3
else:
nb_colors = None
if not header["has_palette"].value:
if header["has_alpha"].value:
self.pixel_format = _("RGBA")
else:
self.pixel_format = _("RGB")
elif "/transparency" in header:
self.pixel_format = _("Color index with transparency")
if nb_colors:
nb_colors -= 1
else:
self.pixel_format = _("Color index")
self.bits_per_pixel = pngBitsPerPixel(header)
if nb_colors:
self.nb_colors = nb_colors
# Read compression, timestamp, etc.
self.compression = header["compression"].display
class GifMetadata(RootMetadata):
def extract(self, gif):
self.useScreen(gif["/screen"])
if self.has("bits_per_pixel"):
self.nb_colors = (1 << self.get('bits_per_pixel'))
self.compression = _("LZW")
self.format_version = "GIF version %s" % gif["version"].value
for comments in gif.array("comments"):
for comment in gif.array(comments.name + "/comment"):
self.comment = comment.value
if "graphic_ctl/has_transp" in gif and gif["graphic_ctl/has_transp"].value:
self.pixel_format = _("Color index with transparency")
else:
self.pixel_format = _("Color index")
@fault_tolerant
def useScreen(self, screen):
self.width = screen["width"].value
self.height = screen["height"].value
self.bits_per_pixel = (1 + screen["bpp"].value)
class TargaMetadata(RootMetadata):
def extract(self, tga):
self.width = tga["width"].value
self.height = tga["height"].value
self.bits_per_pixel = tga["bpp"].value
if tga["nb_color"].value:
self.nb_colors = tga["nb_color"].value
self.compression = tga["codec"].display
if "pixels" in tga:
computeComprRate(self, tga["pixels"].size)
class WmfMetadata(RootMetadata):
def extract(self, wmf):
if wmf.isAPM():
if "amf_header/rect" in wmf:
rect = wmf["amf_header/rect"]
self.width = (rect["right"].value - rect["left"].value)
self.height = (rect["bottom"].value - rect["top"].value)
self.bits_per_pixel = 24
elif wmf.isEMF():
emf = wmf["emf_header"]
if "description" in emf:
desc = emf["description"].value
if "\0" in desc:
self.producer, self.title = desc.split("\0", 1)
else:
self.producer = desc
if emf["nb_colors"].value:
self.nb_colors = emf["nb_colors"].value
self.bits_per_pixel = 8
else:
self.bits_per_pixel = 24
self.width = emf["width_px"].value
self.height = emf["height_px"].value
class PsdMetadata(RootMetadata):
@fault_tolerant
def extract(self, psd):
self.width = psd["width"].value
self.height = psd["height"].value
self.bits_per_pixel = psd["depth"].value * psd["nb_channels"].value
self.pixel_format = psd["color_mode"].display
self.compression = psd["compression"].display
registerExtractor(IcoFile, IcoMetadata)
registerExtractor(GifFile, GifMetadata)
registerExtractor(XcfFile, XcfMetadata)
registerExtractor(TargaFile, TargaMetadata)
registerExtractor(PcxFile, PcxMetadata)
registerExtractor(BmpFile, BmpMetadata)
registerExtractor(PngFile, PngMetadata)
registerExtractor(TiffFile, TiffMetadata)
registerExtractor(WMF_File, WmfMetadata)
registerExtractor(PsdFile, PsdMetadata)
| gpl-3.0 |
spadae22/odoo | addons/mrp_operations/__init__.py | 443 | 1091 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_operations
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zhlinh/leetcode | 0284.Peeking Iterator/solution.py | 1 | 2930 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-05-10
Last_modify: 2016-05-10
******************************************
'''
'''
Given an Iterator class interface with methods: next() and hasNext(),
design and implement a PeekingIterator that support the peek() operation
-- it essentially peek() at the element that will be returned by
the next call to next().
Here is an example. Assume that the iterator is initialized to the
beginning of the list: [1, 2, 3].
Call next() gets you 1, the first element in the list.
Now you call peek() and it returns 2, the next element.
Calling next() after that still return 2.
You call next() the final time and it returns 3, the last element.
Calling hasNext() after that should return false.
Hint:
Think of "looking ahead". You want to cache the next element.
Is one variable sufficient? Why or why not?
Test your design with call order of peek() before next() vs next() before peek().
For a clean implementation, check out Google's guava library source code.
Follow up: How would you extend your design to be generic and work with
all types, not just integer?
Credits:
Special thanks to @porker2008 for adding this problem and creating all test cases.
'''
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator(object):
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.it = iterator
self.forward = None
if self.it.hasNext():
self.forward = self.it.next()
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.forward
def next(self):
"""
:rtype: int
"""
tmp = self.forward
self.forward = self.it.next() if self.it.hasNext() else None
return tmp
def hasNext(self):
"""
:rtype: bool
"""
return True if self.forward else False
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.