repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
thefinn93/orgsms | orgsms/api.py | 1 | 3675 | from flask import Blueprint, abort, jsonify, request, current_app, Response
import datetime
from sqlalchemy import desc
from .provider import providers
from .socketio import socketio
from . import models, exceptions
app = Blueprint('api', __name__)
@app.route('/inbound/<provider>', methods=["POST"])
def inbound(provider):
if provider in providers:
message = providers[provider].receive()
models.db.session.add(message)
models.db.session.commit()
message.push()
return Response()
else:
return abort(404)
def send(local_number, remote_number, text, provider=None):
if provider is None:
local = models.PhoneNumber.query.get(local_number)
if local is not None:
provider = local.provider
if provider is None:
raise exceptions.CantDetermineProviderException()
if provider not in providers:
raise exceptions.UnknownProviderException("Provider {} unknown".format(provider))
message = models.Message(local_number=local_number, remote_number=remote_number,
inbound=False, mms=False, text=text)
models.db.session.add(message)
current_app.logger.debug("Sending %s to %s from %s", text, remote_number, local_number)
providers[provider].send(message)
models.db.session.commit()
broadcast_msg = message.json()
broadcast_msg['source_session'] = request.sid
socketio.emit('newmessage', broadcast_msg)
return message
@app.route('/outbound', methods=["POST"])
def outbound():
try:
message = send(request.form.get("from"), request.form.get("to"), request.form.get("text"))
return jsonify({"id": message.id})
except (exceptions.CantDetermineProviderException, exceptions.UnknownProviderException):
return abort(400)
@socketio.on('send')
def outbound_socket(json):
current_app.logger.debug("Received message from client %s: %s", request.sid, json)
try:
message = send(json.get("from"), json.get("to"), json.get("text"))
return {"success": True, "message": message.json()}
except (exceptions.CantDetermineProviderException, exceptions.UnknownProviderException):
current_app.logger.exception("Failed to send %s", str(json))
return {"success": False}
@app.route('/messages/<number>')
def get_messages(number):
results = []
query = models.Message.query.filter_by(remote_number=number)
if request.args.get('after') is not None:
after = datetime.datetime.fromtimestamp(float(request.args.get('after')))
query = query.filter(models.Message.timestamp > after)
if request.args.get('before') is not None:
before = datetime.datetime.fromtimestamp(float(request.args.get('before')))
query = query.filter(models.Message.timestamp < before)
query = query.order_by(desc(models.Message.timestamp)).limit(50)
for message in query.all():
results.append({
"mms": message.mms,
"inbound": message.inbound,
"text": message.text,
"attachment": message.attachment,
"timestamp": message.timestamp.timestamp()
})
return jsonify(results)
@app.route("/messages")
def get_conversations():
query = models.db.session.query(models.Message.remote_number.distinct().label("number"))
conversations = []
for conversation in query.all():
contact = models.Contact.query.filter_by(number=conversation.number).first()
conversations.append({
"number": conversation.number,
"name": contact.name if contact is not None else None
})
return jsonify(conversations)
| gpl-3.0 |
zhinaonet/sqlmap-z | thirdparty/termcolor/termcolor.py | 168 | 5044 | # coding: utf-8
# Copyright (c) 2008-2011 Volvox Development Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <konstantin.lepa@gmail.com>
"""ANSII Color formatting for output in terminal."""
from __future__ import print_function
import os
__ALL__ = [ 'colored', 'cprint' ]
VERSION = (1, 1, 0)
ATTRIBUTES = dict(
list(zip([
'bold',
'dark',
'',
'underline',
'blink',
'',
'reverse',
'concealed'
],
list(range(1, 9))
))
)
del ATTRIBUTES['']
HIGHLIGHTS = dict(
list(zip([
'on_grey',
'on_red',
'on_green',
'on_yellow',
'on_blue',
'on_magenta',
'on_cyan',
'on_white'
],
list(range(40, 48))
))
)
COLORS = dict(
list(zip([
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
],
list(range(30, 38))
))
)
RESET = '\033[0m'
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print((colored(text, color, on_color, attrs)), **kwargs)
if __name__ == '__main__':
print('Current terminal type: %s' % os.getenv('TERM'))
print('Test basic colors:')
cprint('Grey color', 'grey')
cprint('Red color', 'red')
cprint('Green color', 'green')
cprint('Yellow color', 'yellow')
cprint('Blue color', 'blue')
cprint('Magenta color', 'magenta')
cprint('Cyan color', 'cyan')
cprint('White color', 'white')
print(('-' * 78))
print('Test highlights:')
cprint('On grey color', on_color='on_grey')
cprint('On red color', on_color='on_red')
cprint('On green color', on_color='on_green')
cprint('On yellow color', on_color='on_yellow')
cprint('On blue color', on_color='on_blue')
cprint('On magenta color', on_color='on_magenta')
cprint('On cyan color', on_color='on_cyan')
cprint('On white color', color='grey', on_color='on_white')
print('-' * 78)
print('Test attributes:')
cprint('Bold grey color', 'grey', attrs=['bold'])
cprint('Dark red color', 'red', attrs=['dark'])
cprint('Underline green color', 'green', attrs=['underline'])
cprint('Blink yellow color', 'yellow', attrs=['blink'])
cprint('Reversed blue color', 'blue', attrs=['reverse'])
cprint('Concealed Magenta color', 'magenta', attrs=['concealed'])
cprint('Bold underline reverse cyan color', 'cyan',
attrs=['bold', 'underline', 'reverse'])
cprint('Dark blink concealed white color', 'white',
attrs=['dark', 'blink', 'concealed'])
print(('-' * 78))
print('Test mixing:')
cprint('Underline red on grey color', 'red', 'on_grey',
['underline'])
cprint('Reversed green on red color', 'green', 'on_red', ['reverse'])
| gpl-3.0 |
cristianomatos/crKernel-mako | scripts/build-all.py | 1250 | 9474 | #! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
tableau/TabPy | tests/integration/test_deploy_and_evaluate_model_auth_on.py | 1 | 1233 | from . import integ_test_base
class TestDeployAndEvaluateModelAuthOn(integ_test_base.IntegTestBase):
def _get_config_file_name(self) -> str:
return "./tests/integration/resources/deploy_and_evaluate_model_auth.conf"
def _get_port(self) -> str:
return "9009"
def test_deploy_and_evaluate_model(self):
# Uncomment the following line to preserve
# test case output and other files (config, state, ect.)
# in system temp folder.
# self.set_delete_temp_folder(False)
self.deploy_models(self._get_username(), self._get_password())
headers = {
"Content-Type": "application/json",
"Authorization": "Basic dXNlcjE6UEBzc3cwcmQ=",
"Host": "localhost:9009",
}
payload = """{
"data": { "_arg1": ["happy", "sad", "neutral"] },
"script":
"return tabpy.query('Sentiment Analysis',_arg1)['response']"
}"""
conn = self._get_connection()
conn.request("POST", "/evaluate", payload, headers)
SentimentAnalysis_eval = conn.getresponse()
self.assertEqual(200, SentimentAnalysis_eval.status)
SentimentAnalysis_eval.read()
| mit |
bolkedebruin/airflow | airflow/executors/executor_loader.py | 1 | 3573 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""All executors."""
import importlib
from typing import Optional
from airflow.executors.base_executor import BaseExecutor
class ExecutorLoader:
"""
Keeps constants for all the currently available executors.
"""
LOCAL_EXECUTOR = "LocalExecutor"
SEQUENTIAL_EXECUTOR = "SequentialExecutor"
CELERY_EXECUTOR = "CeleryExecutor"
DASK_EXECUTOR = "DaskExecutor"
KUBERNETES_EXECUTOR = "KubernetesExecutor"
DEBUG_EXECUTOR = "DebugExecutor"
_default_executor: Optional[BaseExecutor] = None
executors = {
LOCAL_EXECUTOR: 'airflow.executors.local_executor',
SEQUENTIAL_EXECUTOR: 'airflow.executors.sequential_executor',
CELERY_EXECUTOR: 'airflow.executors.celery_executor',
DASK_EXECUTOR: 'airflow.executors.dask_executor',
KUBERNETES_EXECUTOR: 'airflow.executors.kubernetes_executor',
DEBUG_EXECUTOR: 'airflow.executors.debug_executor'
}
@classmethod
def get_default_executor(cls) -> BaseExecutor:
"""Creates a new instance of the configured executor if none exists and returns it"""
if cls._default_executor is not None:
return cls._default_executor
from airflow.configuration import conf
executor_name = conf.get('core', 'EXECUTOR')
cls._default_executor = ExecutorLoader._get_executor(executor_name)
from airflow import LoggingMixin
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return cls._default_executor
@classmethod
def _get_executor(cls, executor_name: str) -> BaseExecutor:
"""
Creates a new instance of the named executor.
In case the executor name is unknown in airflow,
look for it in the plugins
"""
if executor_name in cls.executors:
executor_module = importlib.import_module(cls.executors[executor_name])
executor = getattr(executor_module, executor_name)
return executor()
else:
# Load plugins here for executors as at that time the plugins might not have been initialized yet
# TODO: verify the above and remove two lines below in case plugins are always initialized first
from airflow import plugins_manager
plugins_manager.integrate_executor_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise ValueError(f"Executor {executor_name} not supported: "
f"please specify in format plugin_module.executor")
if executor_path[0] not in globals():
raise ValueError(f"Executor {executor_name} not supported")
return globals()[executor_path[0]].__dict__[executor_path[1]]()
| apache-2.0 |
rg3/youtube-dl | youtube_dl/extractor/srmediathek.py | 53 | 2316 | # coding: utf-8
from __future__ import unicode_literals
from .ard import ARDMediathekIE
from ..utils import (
ExtractorError,
get_element_by_attribute,
)
class SRMediathekIE(ARDMediathekIE):
IE_NAME = 'sr:mediathek'
IE_DESC = 'Saarländischer Rundfunk'
_VALID_URL = r'https?://sr-mediathek(?:\.sr-online)?\.de/index\.php\?.*?&id=(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455',
'info_dict': {
'id': '28455',
'ext': 'mp4',
'title': 'sportarena (26.10.2014)',
'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ',
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'no longer available',
}, {
'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=37682',
'info_dict': {
'id': '37682',
'ext': 'mp4',
'title': 'Love, Cakes and Rock\'n\'Roll',
'description': 'md5:18bf9763631c7d326c22603681e1123d',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://sr-mediathek.de/index.php?seite=7&id=7480',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>Der gewünschte Beitrag ist leider nicht mehr verfügbar.<' in webpage:
raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
media_collection_url = self._search_regex(
r'data-mediacollection-ardplayer="([^"]+)"', webpage, 'media collection url')
info = self._extract_media_info(media_collection_url, webpage, video_id)
info.update({
'id': video_id,
'title': get_element_by_attribute('class', 'ardplayer-title', webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
})
return info
| unlicense |
zamattiac/osf.io | admin_tests/metrics/test_utils.py | 11 | 6284 | from nose import tools as nt
from datetime import timedelta, datetime
from tests.base import AdminTestCase
from tests.factories import (
AuthUserFactory, NodeFactory, ProjectFactory, RegistrationFactory
)
from website.project.model import Node, User
from framework.auth import Auth
from admin.metrics.utils import (
get_projects,
get_osf_statistics,
get_list_of_dates,
get_previous_midnight,
get_days_statistics,
DAY_LEEWAY,
get_active_user_count,
get_unregistered_users,
)
from admin.metrics.models import OSFWebsiteStatistics
class TestMetricsGetProjects(AdminTestCase):
def setUp(self):
super(TestMetricsGetProjects, self).setUp()
Node.remove()
self.public_node = ProjectFactory(is_public=True)
self.private_node = ProjectFactory(is_public=False)
self.node_2 = NodeFactory() # creates parent project + node
self.reg = RegistrationFactory(project=self.public_node)
def test_get_all_top_level_nodes(self):
count = get_projects()
nt.assert_equal(count, 4)
def test_get_public_top_level_nodes(self):
count = get_projects(public=True)
nt.assert_equal(count, 1)
def test_get_registrations(self):
count = get_projects(registered=True)
nt.assert_equal(count, 1)
def test_date_created_filter_returns_no_results(self):
time = self.public_node.date_created - timedelta(weeks=1)
count = get_projects(time=time)
nt.assert_equal(count, 0)
class TestMetricsGetDaysStatistics(AdminTestCase):
def setUp(self):
super(TestMetricsGetDaysStatistics, self).setUp()
Node.remove()
NodeFactory(category='project') # makes Node, plus parent
NodeFactory(category='data')
def test_time_now(self):
get_days_statistics(datetime.utcnow())
nt.assert_equal(OSFWebsiteStatistics.objects.count(), 1)
nt.assert_equal(OSFWebsiteStatistics.objects.latest('date').projects, 2)
def test_delta(self):
get_days_statistics(datetime.utcnow())
ProjectFactory()
ProjectFactory()
latest = OSFWebsiteStatistics.objects.latest('date')
get_days_statistics(datetime.utcnow(), latest)
even_later = OSFWebsiteStatistics.objects.latest('date')
nt.assert_equal(even_later.delta_projects, 2)
class TestMetricsGetOSFStatistics(AdminTestCase):
def setUp(self):
super(TestMetricsGetOSFStatistics, self).setUp()
Node.remove()
time_now = get_previous_midnight()
NodeFactory(category='project', date_created=time_now)
NodeFactory(category='project',
date_created=time_now - timedelta(days=1))
last_time = time_now - timedelta(days=2)
NodeFactory(category='project', date_created=last_time)
NodeFactory(category='project', date_created=last_time)
get_days_statistics(last_time + timedelta(seconds=1))
self.time = time_now + timedelta(seconds=1)
def test_get_two_more_days(self):
nt.assert_equal(OSFWebsiteStatistics.objects.count(), 1)
get_osf_statistics()
nt.assert_equal(OSFWebsiteStatistics.objects.count(), 3)
def test_dont_add_another(self):
nt.assert_equal(OSFWebsiteStatistics.objects.count(), 1)
get_osf_statistics()
nt.assert_equal(OSFWebsiteStatistics.objects.count(), 3)
get_osf_statistics()
nt.assert_equal(OSFWebsiteStatistics.objects.count(), 3)
class TestMetricListDays(AdminTestCase):
def test_five_days(self):
time_now = datetime.utcnow()
time_past = time_now - timedelta(days=5)
dates = get_list_of_dates(time_past, time_now)
nt.assert_equal(len(dates), 5)
nt.assert_in(time_now, dates)
def test_month_transition(self):
time_now = datetime.utcnow()
time_end = time_now - timedelta(
days=(time_now.day - 2)
)
time_start = time_end - timedelta(days=5)
dates = get_list_of_dates(time_start, time_end)
nt.assert_equal(len(dates), 5)
def test_off_by_seconds(self):
time_now = datetime.utcnow()
time_start = time_now - timedelta(
seconds=DAY_LEEWAY + 1
)
dates = get_list_of_dates(time_start, time_now)
nt.assert_equal(len(dates), 1)
def test_on_exact_time(self):
time_now = datetime.utcnow()
time_start = time_now - timedelta(
seconds=DAY_LEEWAY
)
dates = get_list_of_dates(time_start, time_now)
nt.assert_equal(len(dates), 0)
def test_just_missed_time(self):
time_now = datetime.utcnow()
time_start = time_now - timedelta(
seconds=DAY_LEEWAY - 1
)
dates = get_list_of_dates(time_start, time_now)
nt.assert_equal(len(dates), 0)
class TestMetricPreviousMidnight(AdminTestCase):
def test_midnight(self):
time_now = datetime.utcnow()
midnight = get_previous_midnight(time_now)
nt.assert_equal(midnight.date(), time_now.date())
nt.assert_equal(midnight.hour, 0)
nt.assert_equal(midnight.minute, 0)
nt.assert_equal(midnight.second, 0)
nt.assert_equal(midnight.microsecond, 1)
def test_no_time_given(self):
time_now = datetime.utcnow()
midnight = get_previous_midnight()
nt.assert_equal(midnight.date(), time_now.date())
class TestUserGet(AdminTestCase):
def setUp(self):
super(TestUserGet, self).setUp()
User.remove()
self.user_1 = AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.project = ProjectFactory(creator=self.user_1)
self.project.add_unregistered_contributor(
email='foo@bar.com',
fullname='Weezy F. Baby',
auth=self.auth
)
self.user_3 = AuthUserFactory()
self.user_3.date_confirmed = None
self.user_3.save()
self.user_4 = AuthUserFactory()
def test_get_all_user_count(self):
time_now = datetime.utcnow()
count = get_active_user_count(time_now)
nt.assert_equal(count, 2)
def test_get_unregistered_users(self):
count = get_unregistered_users()
nt.assert_equal(count, 1)
| apache-2.0 |
pdelsante/thug | thug/DOM/History.py | 1 | 3513 | #!/usr/bin/env python
#
# History.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import logging
from .JSClass import JSClass
from .Alexa import Alexa
log = logging.getLogger("Thug")
class History(JSClass):
def __init__(self, window):
self._window = window
self.urls = Alexa
self.pos = len(self.urls) - 1
self.__init_personality()
def __init_personality(self):
self._navigationMode = "automatic"
if log.ThugOpts.Personality.isIE():
self.__init_personality_IE()
return
if log.ThugOpts.Personality.isFirefox():
self.__init_personality_Firefox()
return
if log.ThugOpts.Personality.isChrome():
self.__init_personality_Chrome()
return
if log.ThugOpts.Personality.isSafari():
self.__init_personality_Safari()
return
def __init_personality_IE(self):
pass
def __init_personality_Firefox(self):
self.current = self._current
self.next = self._next
self.previous = self._previous
def __init_personality_Chrome(self):
pass
def __init_personality_Safari(self):
pass
@property
def window(self):
return self._window
@property
def length(self):
return len(self.urls)
@property
def _current(self):
return self.urls[self.pos] if self.length > self.pos and self.pos > 0 else None
@property
def _next(self):
return self.urls[self.pos + 1] if self.length > self.pos + 1 and self.pos > 0 else None
@property
def _previous(self):
return self.urls[self.pos - 1] if self.length > self.pos - 1 and self.pos > 0 else None
def _get_navigationMode(self):
return self._navigationMode
def _set_navigationMode(self, value):
if value in ("automatic", "compatible", "fast", ):
self._navigationMode = value
navigationMode = property(_get_navigationMode, _set_navigationMode)
def pushState(self, state, title, URL):
# self._window.url = URL
pass
def back(self):
"""Loads the previous URL in the history list"""
return self.go(-1)
def forward(self):
"""Loads the next URL in the history list"""
return self.go(1)
def go(self, num_or_url):
"""Loads a specific URL from the history list"""
try:
off = int(num_or_url)
self.pos += off
self.pos = min(max(0, self.pos), len(self.urls) - 1)
self._window.open(self.urls[self.pos])
except ValueError:
self._window.open(num_or_url)
def update(self, url, replace = False):
if replace:
self.urls[self.pos] = url
return
if self.urls[self.pos] != url:
self.urls.insert(self.pos, url)
self.pos += 1
| gpl-2.0 |
guewen/odoo | addons/google_calendar/__openerp__.py | 21 | 1598 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Calendar',
'version': '1.0',
'category': 'Tools',
'description': """
The module adds the possibility to synchronize Google Calendar with OpenERP
========================================
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['google_account', 'calendar'],
'qweb': ['static/src/xml/*.xml'],
'data': [
'res_config_view.xml',
'security/ir.model.access.csv',
'views/google_calendar.xml',
'views/res_users.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
alexproca/askbot-devel | askbot/migrations/0165_update_thread_search.py | 14 | 35446 | # -*- coding: utf-8 -*-
import askbot
import datetime
import os.path
from south.db import db
from south.v2 import DataMigration
from django.db import models
from askbot.search import postgresql
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
db_engine_name = askbot.get_database_engine_name()
if 'postgresql_psycopg2' in db_engine_name:
script_path = os.path.join(
askbot.get_install_directory(),
'search',
'postgresql',
'thread_and_post_models_10032013.plsql'
)
postgresql.setup_full_text_search(script_path)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.askwidget': {
'Meta': {'object_name': 'AskWidget'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_text_field': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'inner_style': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'outer_style': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'askbot.bulktagsubscription': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'BulkTagSubscription'},
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['askbot.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['askbot.Tag']", 'symmetrical': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'askbot.draftanswer': {
'Meta': {'object_name': 'DraftAnswer'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_answers'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_answers'", 'to': "orm['askbot.Thread']"})
},
'askbot.draftquestion': {
'Meta': {'object_name': 'DraftQuestion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'})
},
'askbot.emailfeedsetting': {
'Meta': {'unique_together': "(('subscriber', 'feed_type'),)", 'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.group': {
'Meta': {'object_name': 'Group', '_ormbases': ['auth.Group']},
'description': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'described_group'", 'unique': 'True', 'null': 'True', 'to': "orm['askbot.Post']"}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'is_vip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logo_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'moderate_answers_to_enquirers': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'openness': ('django.db.models.fields.SmallIntegerField', [], {'default': '2'}),
'preapproved_email_domains': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'preapproved_emails': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'})
},
'askbot.groupmembership': {
'Meta': {'object_name': 'GroupMembership', '_ormbases': ['auth.AuthUserGroups']},
'authusergroups_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.AuthUserGroups']", 'unique': 'True', 'primary_key': 'True'}),
'level': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'group_posts'", 'symmetrical': 'False', 'through': "orm['askbot.PostToGroup']", 'to': "orm['askbot.Group']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '16'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_column': "'score'"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'posts'", 'null': 'True', 'blank': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.postflagreason': {
'Meta': {'object_name': 'PostFlagReason'},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'details': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_reject_reasons'", 'to': "orm['askbot.Post']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('post', 'revision'),)", 'object_name': 'PostRevision'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'approved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'by_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.posttogroup': {
'Meta': {'unique_together': "(('post', 'group'),)", 'object_name': 'PostToGroup', 'db_table': "'askbot_post_groups'"},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']"})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Post']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.questionwidget': {
'Meta': {'object_name': 'QuestionWidget'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_by': ('django.db.models.fields.CharField', [], {'default': "'-added_at'", 'max_length': '18'}),
'question_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '7'}),
'search_query': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.TextField', [], {'default': '"\\n@import url(\'http://fonts.googleapis.com/css?family=Yanone+Kaffeesatz:300,400,700\');\\nbody {\\n overflow: hidden;\\n}\\n\\n#container {\\n width: 200px;\\n height: 350px;\\n}\\nul {\\n list-style: none;\\n padding: 5px;\\n margin: 5px;\\n}\\nli {\\n border-bottom: #CCC 1px solid;\\n padding-bottom: 5px;\\n padding-top: 5px;\\n}\\nli:last-child {\\n border: none;\\n}\\na {\\n text-decoration: none;\\n color: #464646;\\n font-family: \'Yanone Kaffeesatz\', sans-serif;\\n font-size: 15px;\\n}\\n"', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'askbot.replyaddress': {
'Meta': {'object_name': 'ReplyAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'allowed_from_email': ('django.db.models.fields.EmailField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reply_addresses'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'reply_action': ('django.db.models.fields.CharField', [], {'default': "'auto_answer_or_comment'", 'max_length': '32'}),
'response_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'edit_addresses'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'suggested_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'suggested_tags'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'tag_wiki': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'described_tag'", 'unique': 'True', 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'group_threads'", 'symmetrical': 'False', 'through': "orm['askbot.ThreadToGroup']", 'to': "orm['askbot.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '16'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_column': "'score'"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.threadtogroup': {
'Meta': {'unique_together': "(('thread', 'group'),)", 'object_name': 'ThreadToGroup', 'db_table': "'askbot_thread_groups'"},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'visibility': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['askbot.Post']"})
},
'auth.authusergroups': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'AuthUserGroups', 'db_table': "'auth_user_groups'", 'managed': 'False'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 26, 14, 41, 36, 405005)'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_signature': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_fake': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '128'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 26, 14, 41, 36, 404683)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_marked_tags': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'subscribed_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
symmetrical = True
| gpl-3.0 |
haeusser/tensorflow | tensorflow/python/ops/sparse_ops.py | 21 | 65964 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Sparse Tensor Representation. See the @{python/sparse_ops} guide.
@@SparseTensor
@@SparseTensorValue
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
@@sparse_merge
@@sparse_concat
@@sparse_reorder
@@sparse_reshape
@@sparse_split
@@sparse_retain
@@sparse_reset_shape
@@sparse_fill_empty_rows
@@sparse_transpose
@@sparse_reduce_sum
@@sparse_reduce_sum_sparse
@@sparse_add
@@sparse_softmax
@@sparse_tensor_dense_matmul
@@sparse_maximum
@@sparse_minimum
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
# pylint: disable=protected-access
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum of the inputs'
sizes along that dimension.
If expand_nonconcat_dim is True, then the output shape along the non-concat
dimensions will be expand to be the largest among all inputs, and it is the
sum of the inputs sizes along the concat dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `axis = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Another example, if 'axis = 1' and the inputs are
sp_inputs[0]: shape = [3, 3]
[0, 2]: "a"
[1, 0]: "b"
[2, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
if expand_nonconcat_dim = False, this will result in an error. But if
expand_nonconcat_dim = True, this will result in:
shape = [3, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[2, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b ] [ ] [b ]
[ c ] [ c ]
Args:
axis: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
expand_nonconcat_dim: Whether to allow the expansion in the non-concat
dimensions. Defaulted to False.
concat_dim: The old (deprecated) name for axis.
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
concat_dim)
sp_inputs = _convert_to_sparse_tensors(sp_inputs)
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_shape[:axis], shape[-1:] if axis == -1 else
shape[axis:axis + 1], [] if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_concat(
inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
If both arguments are sparse, we perform "clipping" as follows. By default,
if two values sum to zero at some index, the output `SparseTensor` would still
include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum of two sparse operands is (densified):
[ 2]
[.1 0]
[ 6 -.2]
Then,
* `thresh == 0` (the default): all 5 index/value pairs will be returned.
* `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
index/value pairs will be returned.
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
a: The first operand; `SparseTensor` or `Tensor`.
b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
if all(isinstance(inp, sparse_classes) for inp in [a, b]):
a = _convert_to_sparse_tensor(a)
thresh = ops.convert_to_tensor(
thresh, dtype=a.values.dtype.real_dtype, name="thresh")
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_add(
a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape,
thresh))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
a, b = b, a
return gen_sparse_ops._sparse_tensor_dense_add(
a.indices, a.values, a.dense_shape, b)
def sparse_dense_cwise_add(sp_t, dense_t):
"""Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By the rules, the result is a logical SparseTensor with exactly the same
indices and shape, but possibly with different non-zero values. The output of
this Op is the resultant non-zero values.
Args:
sp_t: the SparseTensor operand.
dense_t: the dense Tensor operand; must have the same dtype and a
broadcast-compatible shape as `sp_t`.
Returns:
output: the SparseTensor output.
"""
result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
sp_t.dense_shape, dense_t)
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering
along increasing dimension number. The only time ordering can be violated
is during manual manipulation of the indices and values to add entries.
Reordering does not affect the shape of the `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[4, 5]` and
`indices` / `values`:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same shape and non-empty values, but in
canonical ordering.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
reordered_ind, reordered_val = (gen_sparse_ops._sparse_reorder(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))
return sparse_tensor.SparseTensor(reordered_ind, reordered_val,
array_ops.identity(sp_input.dense_shape))
def sparse_reshape(sp_input, shape, name=None):
"""Reshapes a `SparseTensor` to represent values in a new dense shape.
This operation has the same semantics as `reshape` on the represented dense
tensor. The indices of non-empty values in `sp_input` are recomputed based
on the new dense shape, and a new `SparseTensor` is returned containing the
new indices and new shape. The order of non-empty values in `sp_input` is
unchanged.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of `shape` can be -1. The number of dense elements
implied by `shape` must be the same as the number of dense elements
originally represented by `sp_input`.
For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:
[0, 0, 0]: a
[0, 0, 1]: b
[0, 1, 0]: c
[1, 0, 0]: d
[1, 2, 3]: e
and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of
shape `[9, 4]` and `indices` / `values`:
[0, 0]: a
[0, 1]: b
[1, 2]: c
[4, 2]: d
[8, 1]: e
Args:
sp_input: The input `SparseTensor`.
shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
represented `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same non-empty values but with indices calculated
by the new dense shape.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseReshape", [sp_input]) as name:
reshaped_ind, reshaped_shape = gen_sparse_ops._sparse_reshape(
sp_input.indices, sp_input.dense_shape, shape, name=name)
return sparse_tensor.SparseTensor(
reshaped_ind, array_ops.identity(sp_input.values),
reshaped_shape)
# TODO(aselle): Remove keyword required once for 1.0 final
class KeywordRequired(object):
def __repr__(self):
# This is needed to make documentation without fully qualified module paths
return "KeywordRequired()"
def sparse_split(keyword_required=KeywordRequired(),
sp_input=None, num_split=None, axis=None,
name=None, split_dim=None):
"""Split a `SparseTensor` into `num_split` tensors along `axis`.
If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
each slice starting from 0:`shape[axis] % num_split` gets extra one
dimension. For example, if `axis = 1` and `num_split = 2` and the
input is:
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] =
[ a ]
[b c ]
output_tensor[1] =
[ d e ]
[ ]
Args:
keyword_required: Python 2 standin for * (temporary for argument reorder)
sp_input: The `SparseTensor` to split.
num_split: A Python integer. The number of ways to split.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
name: A name for the operation (optional).
split_dim: Deprecated old name for axis.
Returns:
`num_split` `SparseTensor` objects resulting from splitting `value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If the deprecated `split_dim` and `axis` are both non None.
"""
if not isinstance(keyword_required, KeywordRequired):
raise ValueError("Keyword arguments are required for this function.")
if sp_input is None:
raise ValueError("sp_input is required")
if num_split is None:
raise ValueError("num_split is required")
if axis is None:
raise ValueError("axis is required")
axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim",
split_dim)
sp_input = _convert_to_sparse_tensor(sp_input)
output_inds, output_vals, output_shapes = (gen_sparse_ops._sparse_split(
axis,
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
num_split,
name=name))
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(
sparse_tensor.SparseTensor(
output_inds[i], output_vals[i], output_shapes[i]))
return sparse_tensors
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=0,
validate_indices=True,
name=None):
"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```python
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values`
is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is True, these properties
are checked during execution.
Args:
sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops._sparse_to_dense(
sparse_indices,
output_shape,
sparse_values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
def sparse_reduce_sum(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_sum(x) ==> 3
tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]]
tf.sparse_reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_sum(
sp_input.indices, sp_input.values,
sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes),
keep_dims)
def sparse_reduce_sum_sparse(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_sum_sparse(
sp_input.indices, sp_input.values,
sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis,
reduction_axes),
keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
name=None):
"""Converts a `SparseTensor` into a dense tensor.
This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.
For example, if `sp_input` has shape `[3, 5]` and non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
and `default_value` is `x`, then the output will be a dense `[3, 5]`
string tensor with values:
[[x a x b x]
[x x x x x]
[c x x x x]]
Indices must be without repeats. This is only
tested if validate_indices is True.
Args:
sp_input: The input `SparseTensor`.
default_value: Scalar value to set for indices not specified in
`sp_input`. Defaults to zero.
validate_indices: A boolean value. If `True`, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name prefix for the returned tensors (optional).
Returns:
A dense tensor with shape `sp_input.dense_shape` and values specified by
the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
`default_value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return sparse_to_dense(
sp_input.indices,
sp_input.dense_shape,
sp_input.values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
The last dimension of `sp_input.indices` is discarded and replaced with
the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,
then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where
output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
and False elsewhere in `output`.
For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:
[0, 0, 0]: 0
[0, 1, 0]: 10
[1, 0, 3]: 103
[1, 1, 2]: 150
[1, 1, 3]: 149
[1, 1, 4]: 150
[1, 2, 1]: 121
and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
tensor with False everywhere except at positions
(0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 121).
Note that repeats are allowed in the input SparseTensor.
This op is useful for converting `SparseTensor`s into dense formats for
compatibility with ops that expect dense tensors.
The input `SparseTensor` must be in row-major order.
Args:
sp_input: A `SparseTensor` with `values` property of type `int32` or
`int64`.
vocab_size: A scalar int64 Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A dense bool indicator tensor representing the indices with specified value.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = sparse_tensor.SparseTensor(
sp_input.indices, new_values, sp_input.dense_shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
# validate_indices may be False because we allow duplicates in new_indices:
# repeated indices are allowed when creating an indicator matrix.
return sparse_tensor_to_dense(
sp_new, default_value=False, validate_indices=False, name=name)
def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
already_sorted=False):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
The most common use case for this function occurs when feature ids and
their corresponding values are stored in `Example` protos on disk.
`parse_example` will return a batch of ids and a batch of values, and this
function joins them into a single logical `SparseTensor` for use in
functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
The `SparseTensor` returned by this function has the following properties:
- `indices` is equivalent to `sp_ids.indices` with the last
dimension discarded and replaced with `sp_ids.values`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`.
For example, consider the following feature vectors:
```python
vector1 = [-3, 0, 0, 0, 0, 0]
vector2 = [ 0, 1, 0, 4, 1, 0]
vector3 = [ 5, 0, 0, 9, 0, 0]
```
These might be stored sparsely in the following Example protos by storing
only the feature ids (column number if the vectors are treated as a matrix)
of the non-zero elements and the corresponding values:
```python
examples = [Example(features={
"ids": Feature(int64_list=Int64List(value=[0])),
"values": Feature(float_list=FloatList(value=[-3]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
"values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[0, 3])),
"values": Feature(float_list=FloatList(value=[5, 9]))})]
```
The result of calling parse_example on these examples will produce a
dictionary with entries for "ids" and "values". Passing those two objects
to this function along with vocab_size=6, will produce a `SparseTensor` that
sparsely represents all three instances. Namely, the `indices` property will
contain the coordinates of the non-zero entries in the feature matrix (the
first dimension is the row number in the matrix, i.e., the index within the
batch, and the second dimension is the column number, i.e., the feature id);
`values` will contain the actual values. `shape` will be the shape of the
original matrix, i.e., (3, 6). For our example above, the output will be
equal to:
```python
SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
values=[-3, 1, 4, 1, 5, 9],
dense_shape=[3, 6])
```
This method generalizes to higher-dimensions by simply providing a list for
both the sp_ids as well as the vocab_size.
In this case the resulting `SparseTensor` has the following properties:
- `indices` is equivalent to `sp_ids[0].indices` with the last
dimension discarded and concatenated with
`sp_ids[0].values, sp_ids[1].values, ...`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn] + vocab_size`.
Args:
sp_ids: A single `SparseTensor` with `values` property of type `int32`
or `int64` or a Python list of such `SparseTensor`s or a list thereof.
sp_values: A`SparseTensor` of any type.
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
all `i`.
name: A name prefix for the returned tensors (optional)
already_sorted: A boolean to specify whether the per-batch values in
`sp_values` are already sorted. If so skip sorting, False by default
(optional).
Returns:
A `SparseTensor` compactly representing a batch of feature ids and values,
useful for passing to functions that expect such a `SparseTensor`.
Raises:
TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither
a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a
`Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if
`vocab_size` is not a or list thereof and `sp_ids` is a list.
ValueError: If `sp_ids` and `vocab_size` are lists of different lengths.
"""
if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance(
sp_ids, sparse_tensor.SparseTensor):
sp_ids = [sp_ids]
if not (isinstance(vocab_size, ops.Tensor) or
isinstance(vocab_size, numbers.Integral)):
raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" %
type(vocab_size))
vocab_size = [vocab_size]
else:
if not isinstance(sp_ids, collections.Iterable):
raise TypeError("sp_ids has to be a SparseTensor or list thereof. "
"Found %s" % type(sp_ids))
if not isinstance(vocab_size, collections.Iterable):
raise TypeError("vocab_size has to be a list of Tensors or Python ints. "
"Found %s" % type(vocab_size))
for dim in vocab_size:
if not (isinstance(dim, ops.Tensor) or
isinstance(dim, numbers.Integral)):
raise TypeError(
"vocab_size has to be a list of Tensors or Python ints. Found %s" %
type(dim))
if len(sp_ids) != len(vocab_size):
raise ValueError("sp_ids and vocab_size have to have equal lengths.")
with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]):
sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids]
sp_values = _convert_to_sparse_tensor(sp_values)
ids = []
for sp_ids_dim in sp_ids:
ids_dim = sp_ids_dim.values
if sp_ids_dim.dtype != dtypes.int64:
ids_dim = math_ops.cast(ids_dim, dtypes.int64)
ids += [array_ops.expand_dims(ids_dim, axis=1)]
vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size]
# Slice off the last dimension of indices, then tack on the ids
indices_columns_to_preserve = sp_ids[0].indices[:, :-1]
new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1)
new_values = sp_values.values
new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0)
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
return result if already_sorted else sparse_reorder(result)
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
and `to_retain = [True, False, False, True]`, then the output will
be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
[0, 1]: a
[3, 1]: d
Args:
sp_input: The input `SparseTensor` with `N` non-empty elements.
to_retain: A bool vector of length `N` with `M` true values.
Returns:
A `SparseTensor` with the same shape as the input and `M` non-empty
elements corresponding to the true positions in `to_retain`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
to_retain = ops.convert_to_tensor(to_retain)
# Shape checking, if shape is known at graph construction time
retain_shape = to_retain.get_shape()
retain_shape.assert_has_rank(1)
sp_input.values.get_shape()[0].merge_with(retain_shape[0])
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return sparse_tensor.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.dense_shape))
def sparse_reset_shape(sp_input, new_shape=None):
"""Resets the shape of a `SparseTensor` with indices and values unchanged.
If `new_shape` is None, returns a copy of `sp_input` with its shape reset
to the tight bounding box of `sp_input`.
If `new_shape` is provided, then it must be larger or equal in all dimensions
compared to the shape of `sp_input`. When this condition is met, the returned
SparseTensor will have its shape reset to `new_shape` and its indices and
values unchanged from that of `sp_input.`
For example:
Consider a `sp_input` with shape [2, 3, 5]:
[0, 0, 1]: a
[0, 1, 0]: b
[0, 2, 2]: c
[1, 0, 3]: d
- It is an error to set `new_shape` as [3, 7] since this represents a
rank-2 tensor while `sp_input` is rank-3. This is either a ValueError
during graph construction (if both shapes are known) or an OpError during
run time.
- Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or
equal in every dimension compared to the original shape [2, 3, 5].
- On the other hand, setting new_shape as [2, 3, 4] is also an error: The
third dimension is smaller than the original shape [2, 3, 5] (and an
`InvalidArgumentError` will be raised).
- If `new_shape` is None, the returned SparseTensor will have a shape
[2, 3, 4], which is the tight bounding box of `sp_input`.
Args:
sp_input: The input `SparseTensor`.
new_shape: None or a vector representing the new shape for the returned
`SparseTensor`.
Returns:
A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is
`new_shape` if that is set. Otherwise it is the tight bounding box of
`input_sp`
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If `new_shape` represents a tensor with a different rank from
that of `sp_input` (if shapes are known when graph is constructed).
OpError:
- If `new_shape` has dimension sizes that are too small.
- If shapes are not known during graph construction time, and during run
time it is found out that the ranks do not match.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
in_indices = array_ops.identity(sp_input.indices)
in_values = array_ops.identity(sp_input.values)
in_shape = array_ops.identity(sp_input.dense_shape)
if new_shape is None:
dim_low_bound = math_ops.reduce_max(in_indices, 0)
output_shape_tensor = math_ops.add(dim_low_bound,
array_ops.ones_like(in_shape))
else:
output_shape_tensor = ops.convert_to_tensor(new_shape)
output_shape_tensor.get_shape().assert_has_rank(1)
output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
# For cases when shape is known during graph construction, this catches the
# error before the sparse_tensor.SparseTensor catches it.
output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])
# For cases where shape is not known during graph construction.
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_equal(
array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))],
output_shape_tensor)
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(in_shape, output_shape_tensor)],
output_shape_tensor)
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
This op adds entries with the specified `default_value` at index
`[row, 0]` for any row in the input that does not already have a value.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a
[0, 3]: b
[1, 0]: default_value
[2, 0]: c
[3, 1]: d
[4, 0]: default_value
Note that the input may have empty columns at the end, with no effect on
this op.
The output `SparseTensor` will be in row-major order and will have the
same shape as the input.
This op also returns an indicator vector such that
empty_row_indicator[i] = True iff row i was an empty row.
Args:
sp_input: A `SparseTensor` with shape `[N, M]`.
default_value: The value to fill for empty rows, with the same type as
`sp_input.`
name: A name prefix for the returned tensors (optional)
Returns:
sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
rows filled in with `default_value`.
empty_row_indicator: A bool vector of length `N` indicating whether each
input row was empty.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]):
default_value = ops.convert_to_tensor(
default_value, dtype=sp_input.values.dtype)
num_rows = math_ops.cast(sp_input.dense_shape[0], dtypes.int32)
all_row_indices = math_ops.cast(math_ops.range(num_rows), dtypes.int64)
empty_row_indices, _ = array_ops.setdiff1d(all_row_indices,
sp_input.indices[:, 0])
empty_row_indicator = sparse_to_dense(
empty_row_indices,
array_ops.expand_dims(sp_input.dense_shape[0], -1), True,
False)
empty_row_indices_as_column = array_ops.reshape(empty_row_indices, [-1, 1])
additional_indices = array_ops.concat([
empty_row_indices_as_column,
array_ops.zeros_like(empty_row_indices_as_column)
], 1)
additional_values = array_ops.fill(
array_ops.shape(empty_row_indices), default_value)
all_indices_unordered = array_ops.concat(
[sp_input.indices, additional_indices], 0)
all_values_unordered = array_ops.concat(
[sp_input.values, additional_values], 0)
sp_unordered_output = sparse_tensor.SparseTensor(
all_indices_unordered,
all_values_unordered, sp_input.dense_shape)
sp_ordered_output = sparse_reorder(sp_unordered_output)
return sp_ordered_output, empty_row_indicator
def serialize_sparse(sp_input, name=None):
"""Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
A string 3-vector (1D `Tensor`), with each column representing the
serialized `SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._serialize_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)
def serialize_many_sparse(sp_input, name=None):
"""Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `3` columns.
Each column represents serialized `SparseTensor`'s indices, values, and
shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._serialize_many_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`serialize_sparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor` objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops._deserialize_many_sparse(
serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
def sparse_tensor_dense_matmul(sp_a,
b,
adjoint_a=False,
adjoint_b=False,
name=None):
# pylint: disable=line-too-long
"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of A. However, the following
input format is recommended for optimal behavior:
if adjoint_a == false:
A should be sorted in lexicographically increasing order. Use
sparse_reorder if you're not sure.
if adjoint_a == true:
A should be sorted in order of increasing dimension 1 (i.e., "column major"
order instead of "row major" order).
Deciding when to use sparse_tensor_dense_matmul vs. matmul(sp_a=True):
There are a number of questions to ask in the decision process, including:
* Will the SparseTensor A fit in memory if densified?
* Is the column count of the product large (>> 1)?
* Is the density of A larger than approximately 15%?
If the answer to several of these questions is yes, consider
converting the `SparseTensor` to a dense one and using `tf.matmul` with
`sp_a=True`.
This operation tends to perform well when A is more sparse, if the column size
of the product is small (e.g. matrix-vector multiplication), if
`sp_a.dense_shape` takes on large values.
Below is a rough speed comparison between sparse_tensor_dense_matmul,
labelled 'sparse', and matmul(sp_a=True), labelled 'dense'. For purposes of
the comparison, the time spent converting from a SparseTensor to a dense
Tensor is not included, so it is overly conservative with respect to
the time ratio.
Benchmark system:
CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
GPU: NVidia Tesla k40c
Compiled with:
`-c opt --config=cuda --copt=-mavx`
```
tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
A sparse [m, k] with % nonzero values between 1% and 80%
B dense [k, n]
% nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)
0.01 1 True 100 100 0.000221166 0.00010154 0.459112
0.01 1 True 100 1000 0.00033858 0.000109275 0.322745
0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385
0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669
0.01 1 False 100 100 0.000208085 0.000107603 0.51711
0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762
0.01 1 False 1000 100 0.000308222 0.00010345 0.335635
0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124
0.01 10 True 100 100 0.000218522 0.000105537 0.482958
0.01 10 True 100 1000 0.000340882 0.000111641 0.327506
0.01 10 True 1000 100 0.000315472 0.000117376 0.372064
0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128
0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354
0.01 10 False 100 1000 0.000330552 0.000112615 0.340687
0.01 10 False 1000 100 0.000341277 0.000114097 0.334324
0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549
0.01 25 True 100 100 0.000207806 0.000105977 0.509981
0.01 25 True 100 1000 0.000322879 0.00012921 0.400181
0.01 25 True 1000 100 0.00038262 0.00014158 0.370035
0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504
0.01 25 False 100 100 0.000209401 0.000104696 0.499979
0.01 25 False 100 1000 0.000321161 0.000130737 0.407076
0.01 25 False 1000 100 0.000377012 0.000136801 0.362856
0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413
0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833
0.2 1 True 100 1000 0.000348674 0.000147475 0.422959
0.2 1 True 1000 100 0.000336908 0.00010122 0.300439
0.2 1 True 1000 1000 0.001022 0.000203274 0.198898
0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746
0.2 1 False 100 1000 0.000356127 0.000146824 0.41228
0.2 1 False 1000 100 0.000322664 0.000100918 0.312764
0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648
0.2 10 True 100 100 0.000211692 0.000109903 0.519165
0.2 10 True 100 1000 0.000372819 0.000164321 0.440753
0.2 10 True 1000 100 0.000338651 0.000144806 0.427596
0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064
0.2 10 False 100 100 0.000215727 0.000110502 0.512231
0.2 10 False 100 1000 0.000375419 0.0001613 0.429653
0.2 10 False 1000 100 0.000336999 0.000145628 0.432132
0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618
0.2 25 True 100 100 0.000218705 0.000129913 0.594009
0.2 25 True 100 1000 0.000394794 0.00029428 0.745402
0.2 25 True 1000 100 0.000404483 0.0002693 0.665788
0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052
0.2 25 False 100 100 0.000221494 0.0001306 0.589632
0.2 25 False 100 1000 0.000396436 0.000297204 0.74969
0.2 25 False 1000 100 0.000409346 0.000270068 0.659754
0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046
0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836
0.5 1 True 100 1000 0.000415328 0.000223073 0.537101
0.5 1 True 1000 100 0.000358324 0.00011269 0.314492
0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851
0.5 1 False 100 100 0.000224196 0.000101423 0.452386
0.5 1 False 100 1000 0.000400987 0.000223286 0.556841
0.5 1 False 1000 100 0.000368825 0.00011224 0.304318
0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563
0.5 10 True 100 100 0.000222125 0.000112308 0.505608
0.5 10 True 100 1000 0.000461088 0.00032357 0.701753
0.5 10 True 1000 100 0.000394624 0.000225497 0.571422
0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801
0.5 10 False 100 100 0.000232083 0.000114978 0.495418
0.5 10 False 100 1000 0.000454574 0.000324632 0.714146
0.5 10 False 1000 100 0.000379097 0.000227768 0.600817
0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638
0.5 25 True 100 100 0.00023429 0.000151703 0.647501
0.5 25 True 100 1000 0.000497462 0.000598873 1.20386
0.5 25 True 1000 100 0.000460778 0.000557038 1.20891
0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845
0.5 25 False 100 100 0.000228981 0.000155334 0.678371
0.5 25 False 100 1000 0.000496139 0.000620789 1.25124
0.5 25 False 1000 100 0.00045473 0.000551528 1.21287
0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927
0.8 1 True 100 100 0.000222037 0.000105301 0.47425
0.8 1 True 100 1000 0.000410804 0.000329327 0.801664
0.8 1 True 1000 100 0.000349735 0.000131225 0.375212
0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633
0.8 1 False 100 100 0.000214079 0.000107486 0.502085
0.8 1 False 100 1000 0.000413746 0.000323244 0.781261
0.8 1 False 1000 100 0.000348983 0.000131983 0.378193
0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282
0.8 10 True 100 100 0.000229159 0.00011825 0.516017
0.8 10 True 100 1000 0.000498845 0.000532618 1.0677
0.8 10 True 1000 100 0.000383126 0.00029935 0.781336
0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689
0.8 10 False 100 100 0.000230783 0.000124958 0.541452
0.8 10 False 100 1000 0.000493393 0.000550654 1.11606
0.8 10 False 1000 100 0.000377167 0.000298581 0.791642
0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024
0.8 25 True 100 100 0.000233496 0.000175241 0.75051
0.8 25 True 100 1000 0.00055654 0.00102658 1.84458
0.8 25 True 1000 100 0.000463814 0.000783267 1.68875
0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132
0.8 25 False 100 100 0.000240243 0.000175047 0.728625
0.8 25 False 100 1000 0.000578102 0.00104499 1.80763
0.8 25 False 1000 100 0.000485113 0.000776849 1.60138
0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992
```
Args:
sp_a: SparseTensor A, of rank 2.
b: A dense Matrix with the same dtype as sp_a.
adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,
this is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,
this is transpose(conj(B)). Otherwise it's transpose(B).
name: A name prefix for the returned tensors (optional)
Returns:
A dense matrix (pseudo-code in dense np.matrix notation):
A = A.H if adjoint_a else A
B = B.H if adjoint_b else B
return A*B
"""
# pylint: enable=line-too-long
sp_a = _convert_to_sparse_tensor(sp_a)
with ops.name_scope(name, "SparseTensorDenseMatMul",
[sp_a.indices, sp_a.values, b]) as name:
b = ops.convert_to_tensor(b, name="b")
return gen_sparse_ops._sparse_tensor_dense_mat_mul(
a_indices=sp_a.indices,
a_values=sp_a.values,
a_shape=sp_a.dense_shape,
b=b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
def sparse_softmax(sp_input, name=None):
"""Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
(where `N >= 2`), and with indices sorted in the canonical lexicographic
order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each
innermost logical submatrix with shape `[B, C]`, but with the catch that *the
implicitly zero elements do not participate*. Specifically, the algorithm is
equivalent to:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost
submatrix with shape `[B, C]`, along the size-C dimension;
(2) Masks out the original implicitly-zero locations;
(3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and
shape.
Example:
```python
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
shape = [2, 2, 2] # 3-D SparseTensor
values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])
indices = np.vstack(np.where(values)).astype(np.int64).T
result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape))
# ...returning a 3-D SparseTensor, equivalent to:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
```
Args:
sp_input: N-D `SparseTensor`, where `N >= 2`.
name: optional name of the operation.
Returns:
output: N-D `SparseTensor` representing the results.
"""
with ops.name_scope(name, "SparseSoftmax",
[sp_input.indices, sp_input.values]) as name:
out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
sp_input.dense_shape)
return sparse_tensor.SparseTensor(
sp_input.indices, out_vals, sp_input.dense_shape)
def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(name, "SparseSparseMaximum", [sp_a.indices, sp_a.values,
sp_b.indices,
sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(name, "SparseSparseMinimum", [sp_a.indices, sp_a.values,
sp_b.indices,
sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
def sparse_transpose(sp_input, perm=None, name=None):
"""Transposes a `SparseTensor`
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[5, 4]` and
`indices` / `values`:
[0, 2]: c
[1, 0]: a
[1, 3]: d
[3, 0]: b
Args:
sp_input: The input `SparseTensor`.
perm: A permutation of the dimensions of `sp_input`.
name: A name prefix for the returned tensors (optional)
Returns:
A transposed `SparseTensor`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
with ops.op_scope([sp_input], name, "SparseTranspose") as name:
if perm is None:
rank = array_ops.rank(sp_input)
perm = (rank - 1) - math_ops.range(0, rank, 1)
indices = sp_input.indices
transposed_indices = array_ops.transpose(
array_ops.gather(array_ops.transpose(indices), perm))
dense_shape = sp_input.dense_shape
transposed_dense_shape = array_ops.gather(dense_shape, perm)
transposed_st = sparse_tensor.SparseTensor(
transposed_indices, sp_input.values,
transposed_dense_shape)
transposed_st = sparse_reorder(transposed_st)
return transposed_st
def _add_sparse_to_tensors_map(sp_input, container=None,
shared_name=None, name=None):
"""Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.
Args:
sp_input: The input `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string 1-vector (1D `Tensor`), with the single element representing the
a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._add_sparse_to_tensors_map(
sp_input.indices, sp_input.values, sp_input.dense_shape,
container=container, shared_name=shared_name, name=name)
def _add_many_sparse_to_tensors_map(sp_input, container=None,
shared_name=None, name=None):
"""Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `1` column.
Each row represents a unique handle to a `SparseTensor` stored by
the `SparseTensorMap` underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._add_many_sparse_to_tensors_map(
sp_input.indices, sp_input.values, sp_input.dense_shape,
container=container, shared_name=shared_name, name=name)
def _take_many_sparse_from_tensors_map(
sparse_map_op, sparse_handles, rank=None, name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError("sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." %
sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops._take_many_sparse_from_tensors_map(
sparse_handles, dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
| apache-2.0 |
pypa/setuptools | setuptools/extension.py | 6 | 1684 | import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
from .monkey import get_unpatched
def _have_cython():
"""
Return True if Cython can be imported.
"""
cython_impl = 'Cython.Distutils.build_ext'
try:
# from (cython_impl) import build_ext
__import__(cython_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
# for compatibility
have_pyrex = _have_cython
_Extension = get_unpatched(distutils.core.Extension)
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, name, sources, *args, **kw):
# The *args is needed for compatibility as calls may use positional
# arguments. py_limited_api may be set only via keyword.
self.py_limited_api = kw.pop("py_limited_api", False)
_Extension.__init__(self, name, sources, *args, **kw)
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if _have_cython():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
| mit |
mrhanky17/irc3 | irc3/_rfc.py | 2 | 39293 | class retcode(int):
name = None
re = None
RPL_TRACELINK = retcode(200)
RPL_TRACELINK.name = "RPL_TRACELINK"
RPL_TRACELINK.re = (
"^:(?P<srv>\S+) 200 (?P<me>\S+) "
"(?P<next_server>\S+)")
RPL_TRACELINK.tpl = (
':{c.srv} 200 {c.nick} '
'{next_server}')
RPL_TRACELINK.params = ['srv', 'me', 'next_server']
RPL_TRACECONNECTING = retcode(201)
RPL_TRACECONNECTING.name = "RPL_TRACECONNECTING"
RPL_TRACECONNECTING.re = (
"^:(?P<srv>\S+) 201 (?P<me>\S+) "
"Try. (?P<class>\S+) "
"(?P<server>\S+)")
RPL_TRACECONNECTING.tpl = (
':{c.srv} 201 {c.nick} '
'Try. {class} {server}')
RPL_TRACECONNECTING.params = ['srv', 'me', 'class', 'server']
RPL_TRACEHANDSHAKE = retcode(202)
RPL_TRACEHANDSHAKE.name = "RPL_TRACEHANDSHAKE"
RPL_TRACEHANDSHAKE.re = (
"^:(?P<srv>\S+) 202 (?P<me>\S+) "
"H.S. (?P<class>\S+) "
"(?P<server>\S+)")
RPL_TRACEHANDSHAKE.tpl = (
':{c.srv} 202 {c.nick} '
'H.S. {class} {server}')
RPL_TRACEHANDSHAKE.params = ['srv', 'me', 'class', 'server']
RPL_TRACEUNKNOWN = retcode(203)
RPL_TRACEUNKNOWN.name = "RPL_TRACEUNKNOWN"
RPL_TRACEUNKNOWN.re = (
"^:(?P<srv>\S+) 203 (?P<me>\S+) "
"\S+ (?P<class>\S+) [(?P<clientip>\S+)]")
RPL_TRACEUNKNOWN.tpl = (
':{c.srv} 203 {c.nick} '
'???? {class} [{clientip}]')
RPL_TRACEUNKNOWN.params = ['srv', 'me', 'class', 'clientip']
RPL_TRACEOPERATOR = retcode(204)
RPL_TRACEOPERATOR.name = "RPL_TRACEOPERATOR"
RPL_TRACEOPERATOR.re = (
"^:(?P<srv>\S+) 204 (?P<me>\S+) "
"Oper (?P<class>\S+) (?P<nick>\S+)")
RPL_TRACEOPERATOR.tpl = (
':{c.srv} 204 {c.nick} '
'Oper {class} {nick}')
RPL_TRACEOPERATOR.params = ['srv', 'me', 'class', 'nick']
RPL_TRACEUSER = retcode(205)
RPL_TRACEUSER.name = "RPL_TRACEUSER"
RPL_TRACEUSER.re = (
"^:(?P<srv>\S+) 205 (?P<me>\S+) "
"User (?P<class>\S+) (?P<nick>\S+)")
RPL_TRACEUSER.tpl = (
':{c.srv} 205 {c.nick} '
'User {class} {nick}')
RPL_TRACEUSER.params = ['srv', 'me', 'class', 'nick']
RPL_TRACESERVER = retcode(206)
RPL_TRACESERVER.name = "RPL_TRACESERVER"
RPL_TRACESERVER.re = (
"^:(?P<srv>\S+) 206 (?P<me>\S+) "
"(?P<mask>\S+)")
RPL_TRACESERVER.tpl = (
':{c.srv} 206 {c.nick} '
'{mask}')
RPL_TRACESERVER.params = ['srv', 'me', 'mask']
RPL_TRACENEWTYPE = retcode(208)
RPL_TRACENEWTYPE.name = "RPL_TRACENEWTYPE"
RPL_TRACENEWTYPE.re = (
"^:(?P<srv>\S+) 208 (?P<me>\S+) "
"(?P<newtype>\S+) 0 (?P<client>\S+)")
RPL_TRACENEWTYPE.tpl = (
':{c.srv} 208 {c.nick} '
'{newtype} 0 {client}')
RPL_TRACENEWTYPE.params = ['srv', 'me', 'newtype', 'client']
RPL_STATSLINKINFO = retcode(211)
RPL_STATSLINKINFO.name = "RPL_STATSLINKINFO"
RPL_STATSLINKINFO.re = (
"^:(?P<srv>\S+) 211 (?P<me>\S+) "
"(?P<linkname>\S+) (?P<sendq>\S+) "
"(?P<sent_messages>\S+) (?P<received_bytes>\S+) (?P<time_open>\S+)")
RPL_STATSLINKINFO.tpl = (
':{c.srv} 211 {c.nick} '
':{linkname} {sendq} {sent_messages} {received_bytes} {time_open}')
RPL_STATSLINKINFO.params = [
'srv',
'me',
'linkname',
'sendq',
'sent_messages',
'received_bytes',
'time_open']
RPL_STATSCOMMANDS = retcode(212)
RPL_STATSCOMMANDS.name = "RPL_STATSCOMMANDS"
RPL_STATSCOMMANDS.re = (
"^:(?P<srv>\S+) 212 (?P<me>\S+) "
"(?P<cmd>\S+) (?P<count>\S+)")
RPL_STATSCOMMANDS.tpl = (
':{c.srv} 212 {c.nick} '
'{cmd} {count}')
RPL_STATSCOMMANDS.params = ['srv', 'me', 'cmd', 'count']
RPL_STATSCLINE = retcode(213)
RPL_STATSCLINE.name = "RPL_STATSCLINE"
RPL_STATSCLINE.re = (
"^:(?P<srv>\S+) 213 (?P<me>\S+) "
"C (?P<host>\S+) . (?P<nick>\S+) (?P<port>\S+) (?P<class>\S+)")
RPL_STATSCLINE.tpl = (
':{c.srv} 213 {c.nick} '
'C {host} * {nick} {port} {class}')
RPL_STATSCLINE.params = ['srv', 'me', 'host', 'nick', 'port', 'class']
RPL_STATSNLINE = retcode(214)
RPL_STATSNLINE.name = "RPL_STATSNLINE"
RPL_STATSNLINE.re = (
"^:(?P<srv>\S+) 214 (?P<me>\S+) "
"N (?P<host>\S+) . (?P<nick>\S+) (?P<port>\S+) (?P<class>\S+)")
RPL_STATSNLINE.tpl = (
':{c.srv} 214 {c.nick} '
'N {host} * {nick} {port} {class}')
RPL_STATSNLINE.params = ['srv', 'me', 'host', 'nick', 'port', 'class']
RPL_STATSILINE = retcode(215)
RPL_STATSILINE.name = "RPL_STATSILINE"
RPL_STATSILINE.re = (
"^:(?P<srv>\S+) 215 (?P<me>\S+) "
"I (?P<host>\S+) . (?P<host1>\S+) (?P<port>\S+) (?P<class>\S+)")
RPL_STATSILINE.tpl = (
':{c.srv} 215 {c.nick} '
'I {host} * {host1} {port} {class}')
RPL_STATSILINE.params = ['srv', 'me', 'host', 'host1', 'port', 'class']
RPL_STATSKLINE = retcode(216)
RPL_STATSKLINE.name = "RPL_STATSKLINE"
RPL_STATSKLINE.re = (
"^:(?P<srv>\S+) 216 (?P<me>\S+) "
"K (?P<host>\S+) . (?P<username>\S+) (?P<port>\S+) (?P<class>\S+)")
RPL_STATSKLINE.tpl = (
':{c.srv} 216 {c.nick} '
'K {host} * {username} {port} {class}')
RPL_STATSKLINE.params = ['srv', 'me', 'host', 'username', 'port', 'class']
RPL_STATSYLINE = retcode(218)
RPL_STATSYLINE.name = "RPL_STATSYLINE"
RPL_STATSYLINE.re = (
"^:(?P<srv>\S+) 218 (?P<me>\S+) "
"frequency> (?P<max_sendq>\S+)")
RPL_STATSYLINE.tpl = (
':{c.srv} 218 {c.nick} '
'frequency> {max_sendq}')
RPL_STATSYLINE.params = ['srv', 'me', 'max_sendq']
RPL_ENDOFSTATS = retcode(219)
RPL_ENDOFSTATS.name = "RPL_ENDOFSTATS"
RPL_ENDOFSTATS.re = (
"^:(?P<srv>\S+) 219 (?P<me>\S+) "
"(?P<stats_letter>\S+) :(?P<data>.*)")
RPL_ENDOFSTATS.tpl = (
':{c.srv} 219 {c.nick} '
'{stats_letter} :End of /STATS report')
RPL_ENDOFSTATS.params = ['srv', 'me', 'stats_letter', 'data']
RPL_UMODEIS = retcode(221)
RPL_UMODEIS.name = "RPL_UMODEIS"
RPL_UMODEIS.re = (
"^:(?P<srv>\S+) 221 (?P<me>\S+) "
"(?P<user_mode_string>\S+)")
RPL_UMODEIS.tpl = (
':{c.srv} 221 {c.nick} '
'{user_mode_string}')
RPL_UMODEIS.params = ['srv', 'me', 'user_mode_string']
RPL_STATSLLINE = retcode(241)
RPL_STATSLLINE.name = "RPL_STATSLLINE"
RPL_STATSLLINE.re = (
"^:(?P<srv>\S+) 241 (?P<me>\S+) "
"L (?P<hostmask>\S+) . (?P<servername>\S+) (?P<maxdepth>\S+)")
RPL_STATSLLINE.tpl = (
':{c.srv} 241 {c.nick} '
'L {hostmask} * {servername} {maxdepth}')
RPL_STATSLLINE.params = ['srv', 'me', 'hostmask', 'servername', 'maxdepth']
RPL_STATSUPTIME = retcode(242)
RPL_STATSUPTIME.name = "RPL_STATSUPTIME"
RPL_STATSUPTIME.re = (
"^:(?P<srv>\S+) 242 (?P<me>\S+) "
":(?P<data>.*)")
RPL_STATSUPTIME.tpl = (
':{c.srv} 242 {c.nick} '
':Server Up{days}days {hours}')
RPL_STATSUPTIME.params = ['srv', 'me', 'data']
RPL_STATSOLINE = retcode(243)
RPL_STATSOLINE.name = "RPL_STATSOLINE"
RPL_STATSOLINE.re = (
"^:(?P<srv>\S+) 243 (?P<me>\S+) "
"O (?P<hostmask>\S+) . (?P<nick>\S+)")
RPL_STATSOLINE.tpl = (
':{c.srv} 243 {c.nick} '
'O {hostmask} * {nick}')
RPL_STATSOLINE.params = ['srv', 'me', 'hostmask', 'nick']
RPL_STATSHLINE = retcode(244)
RPL_STATSHLINE.name = "RPL_STATSHLINE"
RPL_STATSHLINE.re = (
"^:(?P<srv>\S+) 244 (?P<me>\S+) "
"H (?P<hostmask>\S+) . (?P<servername>\S+)")
RPL_STATSHLINE.tpl = (
':{c.srv} 244 {c.nick} '
'H {hostmask} * {servername}')
RPL_STATSHLINE.params = ['srv', 'me', 'hostmask', 'servername']
RPL_LUSERCLIENT = retcode(251)
RPL_LUSERCLIENT.name = "RPL_LUSERCLIENT"
RPL_LUSERCLIENT.re = (
"^:(?P<srv>\S+) 251 (?P<me>\S+) "
":(?P<data>.*)")
RPL_LUSERCLIENT.tpl = (
':{c.srv} 251 {c.nick} '
':There are {x} users and {y} invisible on {z} servers')
RPL_LUSERCLIENT.params = ['srv', 'me', 'data']
RPL_LUSEROP = retcode(252)
RPL_LUSEROP.name = "RPL_LUSEROP"
RPL_LUSEROP.re = (
"^:(?P<srv>\S+) 252 (?P<me>\S+) "
"(?P<x>\S+) :(?P<data>.*)")
RPL_LUSEROP.tpl = (
':{c.srv} 252 {c.nick} '
'{x} :operator(s) online')
RPL_LUSEROP.params = ['srv', 'me', 'x', 'data']
RPL_LUSERUNKNOWN = retcode(253)
RPL_LUSERUNKNOWN.name = "RPL_LUSERUNKNOWN"
RPL_LUSERUNKNOWN.re = (
"^:(?P<srv>\S+) 253 (?P<me>\S+) "
"(?P<x>\S+) :(?P<data>.*)")
RPL_LUSERUNKNOWN.tpl = (
':{c.srv} 253 {c.nick} '
'{x} :unknown connection(s)')
RPL_LUSERUNKNOWN.params = ['srv', 'me', 'x', 'data']
RPL_LUSERCHANNELS = retcode(254)
RPL_LUSERCHANNELS.name = "RPL_LUSERCHANNELS"
RPL_LUSERCHANNELS.re = (
"^:(?P<srv>\S+) 254 (?P<me>\S+) "
"(?P<x>\S+) :(?P<data>.*)")
RPL_LUSERCHANNELS.tpl = (
':{c.srv} 254 {c.nick} '
'{x} :channels formed')
RPL_LUSERCHANNELS.params = ['srv', 'me', 'x', 'data']
RPL_LUSERME = retcode(255)
RPL_LUSERME.name = "RPL_LUSERME"
RPL_LUSERME.re = (
"^:(?P<srv>\S+) 255 (?P<me>\S+) "
":(?P<data>.*)")
RPL_LUSERME.tpl = (
':{c.srv} 255 {c.nick} '
':I have {x} clients and {y}')
RPL_LUSERME.params = ['srv', 'me', 'data']
RPL_ADMINME = retcode(256)
RPL_ADMINME.name = "RPL_ADMINME"
RPL_ADMINME.re = (
"^:(?P<srv>\S+) 256 (?P<me>\S+) "
"(?P<server>\S+) :(?P<data>.*)")
RPL_ADMINME.tpl = (
':{c.srv} 256 {c.nick} '
'{server} :Administrative info')
RPL_ADMINME.params = ['srv', 'me', 'server', 'data']
RPL_ADMINLOC1 = retcode(257)
RPL_ADMINLOC1.name = "RPL_ADMINLOC1"
RPL_ADMINLOC1.re = (
"^:(?P<srv>\S+) 257 (?P<me>\S+) "
":(?P<data>.*)")
RPL_ADMINLOC1.tpl = (
':{c.srv} 257 {c.nick} '
':{admin_info}')
RPL_ADMINLOC1.params = ['srv', 'me', 'data']
RPL_ADMINLOC2 = retcode(258)
RPL_ADMINLOC2.name = "RPL_ADMINLOC2"
RPL_ADMINLOC2.re = (
"^:(?P<srv>\S+) 258 (?P<me>\S+) "
":(?P<data>.*)")
RPL_ADMINLOC2.tpl = (
':{c.srv} 258 {c.nick} '
':{admin_info}')
RPL_ADMINLOC2.params = ['srv', 'me', 'data']
RPL_ADMINEMAIL = retcode(259)
RPL_ADMINEMAIL.name = "RPL_ADMINEMAIL"
RPL_ADMINEMAIL.re = (
"^:(?P<srv>\S+) 259 (?P<me>\S+) "
":(?P<data>.*)")
RPL_ADMINEMAIL.tpl = (
':{c.srv} 259 {c.nick} '
':{admin_info}')
RPL_ADMINEMAIL.params = ['srv', 'me', 'data']
RPL_TRACELOG = retcode(261)
RPL_TRACELOG.name = "RPL_TRACELOG"
RPL_TRACELOG.re = (
"^:(?P<srv>\S+) 261 (?P<me>\S+) "
"File (?P<logfile>\S+) (?P<debug_level>\S+)")
RPL_TRACELOG.tpl = (
':{c.srv} 261 {c.nick} '
'File {logfile} {debug_level}')
RPL_TRACELOG.params = ['srv', 'me', 'logfile', 'debug_level']
RPL_AWAY = retcode(301)
RPL_AWAY.name = "RPL_AWAY"
RPL_AWAY.re = (
"^:(?P<srv>\S+) 301 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
RPL_AWAY.tpl = (
':{c.srv} 301 {c.nick} '
'{nick} :{away_message}')
RPL_AWAY.params = ['srv', 'me', 'nick', 'data']
RPL_USERHOST = retcode(302)
RPL_USERHOST.name = "RPL_USERHOST"
RPL_USERHOST.re = (
"^:(?P<srv>\S+) 302 (?P<me>\S+) "
":(?P<data>.*)")
RPL_USERHOST.tpl = (
':{c.srv} 302 {c.nick} '
':[{reply}{{space}{reply}}]')
RPL_USERHOST.params = ['srv', 'me', 'data']
RPL_ISON = retcode(303)
RPL_ISON.name = "RPL_ISON"
RPL_ISON.re = (
"^:(?P<srv>\S+) 303 (?P<me>\S+) "
":(?P<data>.*)")
RPL_ISON.tpl = (
':{c.srv} 303 {c.nick} '
':{nicknames}')
RPL_ISON.params = ['srv', 'me', 'data']
RPL_UNAWAY = retcode(305)
RPL_UNAWAY.name = "RPL_UNAWAY"
RPL_UNAWAY.re = (
"^:(?P<srv>\S+) 305 (?P<me>\S+) "
":(?P<data>.*)")
RPL_UNAWAY.tpl = (
':{c.srv} 305 {c.nick} '
':You are no longer marked as being away')
RPL_UNAWAY.params = ['srv', 'me', 'data']
RPL_NOWAWAY = retcode(306)
RPL_NOWAWAY.name = "RPL_NOWAWAY"
RPL_NOWAWAY.re = (
"^:(?P<srv>\S+) 306 (?P<me>\S+) "
":(?P<data>.*)")
RPL_NOWAWAY.tpl = (
':{c.srv} 306 {c.nick} '
':You have been marked as being away')
RPL_NOWAWAY.params = ['srv', 'me', 'data']
RPL_WHOISUSER = retcode(311)
RPL_WHOISUSER.name = "RPL_WHOISUSER"
RPL_WHOISUSER.re = (
"^:(?P<srv>\S+) 311 (?P<me>\S+) "
"(?P<nick>\S+) (?P<username>\S+) (?P<host>\S+) (?P<m>\S+) :(?P<data>.*)")
RPL_WHOISUSER.tpl = (
':{c.srv} 311 {c.nick} '
'{nick} {username} {host} {m} :{realname}')
RPL_WHOISUSER.params = ['srv', 'me', 'nick', 'username', 'host', 'm', 'data']
RPL_WHOISSERVER = retcode(312)
RPL_WHOISSERVER.name = "RPL_WHOISSERVER"
RPL_WHOISSERVER.re = (
"^:(?P<srv>\S+) 312 (?P<me>\S+) "
"(?P<nick>\S+) "
"(?P<server>\S+) :(?P<data>.*)")
RPL_WHOISSERVER.tpl = (
':{c.srv} 312 {c.nick} '
'{nick} {server} :{server_info}')
RPL_WHOISSERVER.params = ['srv', 'me', 'nick', 'server', 'data']
RPL_WHOISOPERATOR = retcode(313)
RPL_WHOISOPERATOR.name = "RPL_WHOISOPERATOR"
RPL_WHOISOPERATOR.re = (
"^:(?P<srv>\S+) 313 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
RPL_WHOISOPERATOR.tpl = (
':{c.srv} 313 {c.nick} '
'{nick} :is an IRC operator')
RPL_WHOISOPERATOR.params = ['srv', 'me', 'nick', 'data']
RPL_WHOWASUSER = retcode(314)
RPL_WHOWASUSER.name = "RPL_WHOWASUSER"
RPL_WHOWASUSER.re = (
"^:(?P<srv>\S+) 314 (?P<me>\S+) "
"(?P<nick>\S+) (?P<username>\S+) (?P<host>\S+) . :(?P<data>.*)")
RPL_WHOWASUSER.tpl = (
':{c.srv} 314 {c.nick} '
'{nick} {username} {host} * :{realname}')
RPL_WHOWASUSER.params = ['srv', 'me', 'nick', 'username', 'host', 'data']
RPL_ENDOFWHO = retcode(315)
RPL_ENDOFWHO.name = "RPL_ENDOFWHO"
RPL_ENDOFWHO.re = (
"^:(?P<srv>\S+) 315 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
RPL_ENDOFWHO.tpl = (
':{c.srv} 315 {c.nick} '
'{nick} :End of /WHO list')
RPL_ENDOFWHO.params = ['srv', 'me', 'nick', 'data']
RPL_WHOISIDLE = retcode(317)
RPL_WHOISIDLE.name = "RPL_WHOISIDLE"
RPL_WHOISIDLE.re = (
"^:(?P<srv>\S+) 317 (?P<me>\S+) "
"(?P<nick>\S+) (?P<x>\S+) :(?P<data>.*)")
RPL_WHOISIDLE.tpl = (
':{c.srv} 317 {c.nick} '
'{nick} {x} :seconds idle')
RPL_WHOISIDLE.params = ['srv', 'me', 'nick', 'x', 'data']
RPL_ENDOFWHOIS = retcode(318)
RPL_ENDOFWHOIS.name = "RPL_ENDOFWHOIS"
RPL_ENDOFWHOIS.re = (
"^:(?P<srv>\S+) 318 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
RPL_ENDOFWHOIS.tpl = (
':{c.srv} 318 {c.nick} '
'{nick} :End of /WHOIS list')
RPL_ENDOFWHOIS.params = ['srv', 'me', 'nick', 'data']
RPL_WHOISCHANNELS = retcode(319)
RPL_WHOISCHANNELS.name = "RPL_WHOISCHANNELS"
RPL_WHOISCHANNELS.re = (
"^:(?P<srv>\S+) 319 (?P<me>\S+) "
":(?P<data>.*)")
RPL_WHOISCHANNELS.tpl = (
':{c.srv} 319 {c.nick} '
':{channels}')
RPL_WHOISCHANNELS.params = ['srv', 'me', 'data']
RPL_LISTSTART = retcode(321)
RPL_LISTSTART.name = "RPL_LISTSTART"
RPL_LISTSTART.re = (
"^:(?P<srv>\S+) 321 (?P<me>\S+) "
"Channel :(?P<data>.*)")
RPL_LISTSTART.tpl = (
':{c.srv} 321 {c.nick} '
'Channel :Users Name')
RPL_LISTSTART.params = ['srv', 'me', 'data']
RPL_LIST = retcode(322)
RPL_LIST.name = "RPL_LIST"
RPL_LIST.re = (
"^:(?P<srv>\S+) 322 (?P<me>\S+) "
"(?P<channel>\S+) (?P<visible>\S+) :(?P<data>.*)")
RPL_LIST.tpl = (
':{c.srv} 322 {c.nick} '
'{channel} {visible} :{topic}')
RPL_LIST.params = ['srv', 'me', 'channel', 'visible', 'data']
RPL_LISTEND = retcode(323)
RPL_LISTEND.name = "RPL_LISTEND"
RPL_LISTEND.re = (
"^:(?P<srv>\S+) 323 (?P<me>\S+) "
":(?P<data>.*)")
RPL_LISTEND.tpl = (
':{c.srv} 323 {c.nick} '
':End of /LIST')
RPL_LISTEND.params = ['srv', 'me', 'data']
RPL_CHANNELMODEIS = retcode(324)
RPL_CHANNELMODEIS.name = "RPL_CHANNELMODEIS"
RPL_CHANNELMODEIS.re = (
"^:(?P<srv>\S+) 324 (?P<me>\S+) "
"(?P<channel>\S+) (?P<mode>\S+) (?P<mode_params>\S+)")
RPL_CHANNELMODEIS.tpl = (
':{c.srv} 324 {c.nick} '
'{channel} {mode} {mode_params}')
RPL_CHANNELMODEIS.params = ['srv', 'me', 'channel', 'mode', 'mode_params']
RPL_NOTOPIC = retcode(331)
RPL_NOTOPIC.name = "RPL_NOTOPIC"
RPL_NOTOPIC.re = (
"^:(?P<srv>\S+) 331 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
RPL_NOTOPIC.tpl = (
':{c.srv} 331 {c.nick} '
'{channel} :No topic is set')
RPL_NOTOPIC.params = ['srv', 'me', 'channel', 'data']
RPL_TOPIC = retcode(332)
RPL_TOPIC.name = "RPL_TOPIC"
RPL_TOPIC.re = (
"^:(?P<srv>\S+) 332 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
RPL_TOPIC.tpl = (
':{c.srv} 332 {c.nick} '
'{channel} :{topic}')
RPL_TOPIC.params = ['srv', 'me', 'channel', 'data']
RPL_INVITING = retcode(341)
RPL_INVITING.name = "RPL_INVITING"
RPL_INVITING.re = (
"^:(?P<srv>\S+) 341 (?P<me>\S+) "
"(?P<channel>\S+) (?P<nick>\S+)")
RPL_INVITING.tpl = (
':{c.srv} 341 {c.nick} '
'{channel} {nick}')
RPL_INVITING.params = ['srv', 'me', 'channel', 'nick']
RPL_SUMMONING = retcode(342)
RPL_SUMMONING.name = "RPL_SUMMONING"
RPL_SUMMONING.re = (
"^:(?P<srv>\S+) 342 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
RPL_SUMMONING.tpl = (
':{c.srv} 342 {c.nick} '
'{nick} :Summoning user to IRC')
RPL_SUMMONING.params = ['srv', 'me', 'nick', 'data']
RPL_VERSION = retcode(351)
RPL_VERSION.name = "RPL_VERSION"
RPL_VERSION.re = (
"^:(?P<srv>\S+) 351 (?P<me>\S+) "
"(?P<version>\S+).(?P<debuglevel>\S+) "
"(?P<server>\S+) :(?P<data>.*)")
RPL_VERSION.tpl = (
':{c.srv} 351 {c.nick} '
'{version}.{debuglevel} {server} :{comments}')
RPL_VERSION.params = ['srv', 'me', 'version', 'debuglevel', 'server', 'data']
RPL_WHOREPLY = retcode(352)
RPL_WHOREPLY.name = "RPL_WHOREPLY"
RPL_WHOREPLY.re = (
"^:(?P<srv>\S+) 352 (?P<me>\S+) "
"(?P<channel>\S+) (?P<username>\S+) (?P<host>\S+) "
"(?P<server>\S+) (?P<nick>\S+) (?P<modes>\S+) :(?P<data>.*)")
RPL_WHOREPLY.tpl = (
':{c.srv} 352 {c.nick} '
':{channel} {username} {host} {server} {nick} {modes} '
':{hopcount} {realname}')
RPL_WHOREPLY.params = [
'srv',
'me',
'channel',
'username',
'host',
'server',
'nick',
'modes',
'data']
RPL_NAMREPLY = retcode(353)
RPL_NAMREPLY.name = "RPL_NAMREPLY"
RPL_NAMREPLY.re = (
"^:(?P<srv>\S+) 353 (?P<me>\S+) "
"(?P<m>\S+) (?P<channel>\S+) :(?P<data>.*)")
RPL_NAMREPLY.tpl = (
':{c.srv} 353 {c.nick} '
'{m} {channel} :{nicknames}')
RPL_NAMREPLY.params = ['srv', 'me', 'm', 'channel', 'data']
RPL_LINKS = retcode(364)
RPL_LINKS.name = "RPL_LINKS"
RPL_LINKS.re = (
"^:(?P<srv>\S+) 364 (?P<me>\S+) "
"(?P<mask>\S+) "
"(?P<server>\S+) :(?P<data>.*)")
RPL_LINKS.tpl = (
':{c.srv} 364 {c.nick} '
'{mask} {server} :{hopcount} {server_info}')
RPL_LINKS.params = ['srv', 'me', 'mask', 'server', 'data']
RPL_ENDOFLINKS = retcode(365)
RPL_ENDOFLINKS.name = "RPL_ENDOFLINKS"
RPL_ENDOFLINKS.re = (
"^:(?P<srv>\S+) 365 (?P<me>\S+) "
"(?P<mask>\S+) :(?P<data>.*)")
RPL_ENDOFLINKS.tpl = (
':{c.srv} 365 {c.nick} '
'{mask} :End of /LINKS list')
RPL_ENDOFLINKS.params = ['srv', 'me', 'mask', 'data']
RPL_ENDOFNAMES = retcode(366)
RPL_ENDOFNAMES.name = "RPL_ENDOFNAMES"
RPL_ENDOFNAMES.re = (
"^:(?P<srv>\S+) 366 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
RPL_ENDOFNAMES.tpl = (
':{c.srv} 366 {c.nick} '
'{channel} :End of /NAMES list')
RPL_ENDOFNAMES.params = ['srv', 'me', 'channel', 'data']
RPL_BANLIST = retcode(367)
RPL_BANLIST.name = "RPL_BANLIST"
RPL_BANLIST.re = (
"^:(?P<srv>\S+) 367 (?P<me>\S+) "
"(?P<channel>\S+) (?P<banid>\S+)")
RPL_BANLIST.tpl = (
':{c.srv} 367 {c.nick} '
'{channel} {banid}')
RPL_BANLIST.params = ['srv', 'me', 'channel', 'banid']
RPL_ENDOFBANLIST = retcode(368)
RPL_ENDOFBANLIST.name = "RPL_ENDOFBANLIST"
RPL_ENDOFBANLIST.re = (
"^:(?P<srv>\S+) 368 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
RPL_ENDOFBANLIST.tpl = (
':{c.srv} 368 {c.nick} '
'{channel} :End of channel ban list')
RPL_ENDOFBANLIST.params = ['srv', 'me', 'channel', 'data']
RPL_ENDOFWHOWAS = retcode(369)
RPL_ENDOFWHOWAS.name = "RPL_ENDOFWHOWAS"
RPL_ENDOFWHOWAS.re = (
"^:(?P<srv>\S+) 369 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
RPL_ENDOFWHOWAS.tpl = (
':{c.srv} 369 {c.nick} '
'{nick} :End of WHOWAS')
RPL_ENDOFWHOWAS.params = ['srv', 'me', 'nick', 'data']
RPL_INFO = retcode(371)
RPL_INFO.name = "RPL_INFO"
RPL_INFO.re = (
"^:(?P<srv>\S+) 371 (?P<me>\S+) "
":(?P<data>.*)")
RPL_INFO.tpl = (
':{c.srv} 371 {c.nick} '
':{string}')
RPL_INFO.params = ['srv', 'me', 'data']
RPL_MOTD = retcode(372)
RPL_MOTD.name = "RPL_MOTD"
RPL_MOTD.re = (
"^:(?P<srv>\S+) 372 (?P<me>\S+) "
":(?P<data>.*)")
RPL_MOTD.tpl = (
':{c.srv} 372 {c.nick} '
':- {text}')
RPL_MOTD.params = ['srv', 'me', 'data']
RPL_ENDOFINFO = retcode(374)
RPL_ENDOFINFO.name = "RPL_ENDOFINFO"
RPL_ENDOFINFO.re = (
"^:(?P<srv>\S+) 374 (?P<me>\S+) "
":(?P<data>.*)")
RPL_ENDOFINFO.tpl = (
':{c.srv} 374 {c.nick} '
':End of /INFO list')
RPL_ENDOFINFO.params = ['srv', 'me', 'data']
RPL_MOTDSTART = retcode(375)
RPL_MOTDSTART.name = "RPL_MOTDSTART"
RPL_MOTDSTART.re = (
"^:(?P<srv>\S+) 375 (?P<me>\S+) "
":(?P<data>.*)")
RPL_MOTDSTART.tpl = (
':{c.srv} 375 {c.nick} '
':- {server} Message of the day -')
RPL_MOTDSTART.params = ['srv', 'me', 'data']
RPL_ENDOFMOTD = retcode(376)
RPL_ENDOFMOTD.name = "RPL_ENDOFMOTD"
RPL_ENDOFMOTD.re = (
"^:(?P<srv>\S+) 376 (?P<me>\S+) "
":(?P<data>.*)")
RPL_ENDOFMOTD.tpl = (
':{c.srv} 376 {c.nick} '
':End of /MOTD command')
RPL_ENDOFMOTD.params = ['srv', 'me', 'data']
RPL_YOUREOPER = retcode(381)
RPL_YOUREOPER.name = "RPL_YOUREOPER"
RPL_YOUREOPER.re = (
"^:(?P<srv>\S+) 381 (?P<me>\S+) "
":(?P<data>.*)")
RPL_YOUREOPER.tpl = (
':{c.srv} 381 {c.nick} '
':You are now an IRC operator')
RPL_YOUREOPER.params = ['srv', 'me', 'data']
RPL_REHASHING = retcode(382)
RPL_REHASHING.name = "RPL_REHASHING"
RPL_REHASHING.re = (
"^:(?P<srv>\S+) 382 (?P<me>\S+) "
"(?P<config_file>\S+) :(?P<data>.*)")
RPL_REHASHING.tpl = (
':{c.srv} 382 {c.nick} '
'{config_file} :Rehashing')
RPL_REHASHING.params = ['srv', 'me', 'config_file', 'data']
RPL_TIME = retcode(391)
RPL_TIME.name = "RPL_TIME"
RPL_TIME.re = (
"^:(?P<srv>\S+) 391 (?P<me>\S+) "
"(?P<server>\S+) :(?P<data>.*)")
RPL_TIME.tpl = (
':{c.srv} 391 {c.nick} '
"{server} :{string_showing_server's_local_time}")
RPL_TIME.params = ['srv', 'me', 'server', 'data']
RPL_USERSSTART = retcode(392)
RPL_USERSSTART.name = "RPL_USERSSTART"
RPL_USERSSTART.re = (
"^:(?P<srv>\S+) 392 (?P<me>\S+) "
":(?P<data>.*)")
RPL_USERSSTART.tpl = (
':{c.srv} 392 {c.nick} '
':UserID Terminal Host')
RPL_USERSSTART.params = ['srv', 'me', 'data']
RPL_USERS = retcode(393)
RPL_USERS.name = "RPL_USERS"
RPL_USERS.re = (
"^:(?P<srv>\S+) 393 (?P<me>\S+) "
":(?P<data>.*)")
RPL_USERS.tpl = (
':{c.srv} 393 {c.nick} '
'{x} {y} {z}')
RPL_USERS.params = ['srv', 'me', 'data']
RPL_ENDOFUSERS = retcode(394)
RPL_ENDOFUSERS.name = "RPL_ENDOFUSERS"
RPL_ENDOFUSERS.re = (
"^:(?P<srv>\S+) 394 (?P<me>\S+) "
":(?P<data>.*)")
RPL_ENDOFUSERS.tpl = (
':{c.srv} 394 {c.nick} '
':End of users')
RPL_ENDOFUSERS.params = ['srv', 'me', 'data']
RPL_NOUSERS = retcode(395)
RPL_NOUSERS.name = "RPL_NOUSERS"
RPL_NOUSERS.re = (
"^:(?P<srv>\S+) 395 (?P<me>\S+) "
":(?P<data>.*)")
RPL_NOUSERS.tpl = (
':{c.srv} 395 {c.nick} '
':Nobody logged in')
RPL_NOUSERS.params = ['srv', 'me', 'data']
ERR_NOSUCHNICK = retcode(401)
ERR_NOSUCHNICK.name = "ERR_NOSUCHNICK"
ERR_NOSUCHNICK.re = (
"^:(?P<srv>\S+) 401 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
ERR_NOSUCHNICK.tpl = (
':{c.srv} 401 {c.nick} '
'{nick} :No such nick/channel')
ERR_NOSUCHNICK.params = ['srv', 'me', 'nick', 'data']
ERR_NOSUCHSERVER = retcode(402)
ERR_NOSUCHSERVER.name = "ERR_NOSUCHSERVER"
ERR_NOSUCHSERVER.re = (
"^:(?P<srv>\S+) 402 (?P<me>\S+) "
"(?P<server>\S+) :(?P<data>.*)")
ERR_NOSUCHSERVER.tpl = (
':{c.srv} 402 {c.nick} '
'{server} :No such server')
ERR_NOSUCHSERVER.params = ['srv', 'me', 'server', 'data']
ERR_NOSUCHCHANNEL = retcode(403)
ERR_NOSUCHCHANNEL.name = "ERR_NOSUCHCHANNEL"
ERR_NOSUCHCHANNEL.re = (
"^:(?P<srv>\S+) 403 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_NOSUCHCHANNEL.tpl = (
':{c.srv} 403 {c.nick} '
'{channel} :No such channel')
ERR_NOSUCHCHANNEL.params = ['srv', 'me', 'channel', 'data']
ERR_CANNOTSENDTOCHAN = retcode(404)
ERR_CANNOTSENDTOCHAN.name = "ERR_CANNOTSENDTOCHAN"
ERR_CANNOTSENDTOCHAN.re = (
"^:(?P<srv>\S+) 404 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_CANNOTSENDTOCHAN.tpl = (
':{c.srv} 404 {c.nick} '
'{channel} :Cannot send to channel')
ERR_CANNOTSENDTOCHAN.params = ['srv', 'me', 'channel', 'data']
ERR_TOOMANYCHANNELS = retcode(405)
ERR_TOOMANYCHANNELS.name = "ERR_TOOMANYCHANNELS"
ERR_TOOMANYCHANNELS.re = (
"^:(?P<srv>\S+) 405 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_TOOMANYCHANNELS.tpl = (
':{c.srv} 405 {c.nick} '
'{channel} :You have joined too many channels')
ERR_TOOMANYCHANNELS.params = ['srv', 'me', 'channel', 'data']
ERR_WASNOSUCHNICK = retcode(406)
ERR_WASNOSUCHNICK.name = "ERR_WASNOSUCHNICK"
ERR_WASNOSUCHNICK.re = (
"^:(?P<srv>\S+) 406 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
ERR_WASNOSUCHNICK.tpl = (
':{c.srv} 406 {c.nick} '
'{nick} :There was no such nickname')
ERR_WASNOSUCHNICK.params = ['srv', 'me', 'nick', 'data']
ERR_TOOMANYTARGETS = retcode(407)
ERR_TOOMANYTARGETS.name = "ERR_TOOMANYTARGETS"
ERR_TOOMANYTARGETS.re = (
"^:(?P<srv>\S+) 407 (?P<me>\S+) "
"(?P<target>\S+) :(?P<data>.*)")
ERR_TOOMANYTARGETS.tpl = (
':{c.srv} 407 {c.nick} '
'{target} :Duplicate recipients. No message delivered')
ERR_TOOMANYTARGETS.params = ['srv', 'me', 'target', 'data']
ERR_NOORIGIN = retcode(409)
ERR_NOORIGIN.name = "ERR_NOORIGIN"
ERR_NOORIGIN.re = (
"^:(?P<srv>\S+) 409 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NOORIGIN.tpl = (
':{c.srv} 409 {c.nick} '
':No origin specified')
ERR_NOORIGIN.params = ['srv', 'me', 'data']
ERR_NORECIPIENT = retcode(411)
ERR_NORECIPIENT.name = "ERR_NORECIPIENT"
ERR_NORECIPIENT.re = (
"^:(?P<srv>\S+) 411 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NORECIPIENT.tpl = (
':{c.srv} 411 {c.nick} '
':No recipient given ({cmd})')
ERR_NORECIPIENT.params = ['srv', 'me', 'data']
ERR_NOTEXTTOSEND = retcode(412)
ERR_NOTEXTTOSEND.name = "ERR_NOTEXTTOSEND"
ERR_NOTEXTTOSEND.re = (
"^:(?P<srv>\S+) 412 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NOTEXTTOSEND.tpl = (
':{c.srv} 412 {c.nick} '
':No text to send')
ERR_NOTEXTTOSEND.params = ['srv', 'me', 'data']
ERR_NOTOPLEVEL = retcode(413)
ERR_NOTOPLEVEL.name = "ERR_NOTOPLEVEL"
ERR_NOTOPLEVEL.re = (
"^:(?P<srv>\S+) 413 (?P<me>\S+) "
"(?P<mask>\S+) :(?P<data>.*)")
ERR_NOTOPLEVEL.tpl = (
':{c.srv} 413 {c.nick} '
'{mask} :No toplevel domain specified')
ERR_NOTOPLEVEL.params = ['srv', 'me', 'mask', 'data']
ERR_WILDTOPLEVEL = retcode(414)
ERR_WILDTOPLEVEL.name = "ERR_WILDTOPLEVEL"
ERR_WILDTOPLEVEL.re = (
"^:(?P<srv>\S+) 414 (?P<me>\S+) "
"(?P<mask>\S+) :(?P<data>.*)")
ERR_WILDTOPLEVEL.tpl = (
':{c.srv} 414 {c.nick} '
'{mask} :Wildcard in toplevel domain')
ERR_WILDTOPLEVEL.params = ['srv', 'me', 'mask', 'data']
ERR_UNKNOWNCOMMAND = retcode(421)
ERR_UNKNOWNCOMMAND.name = "ERR_UNKNOWNCOMMAND"
ERR_UNKNOWNCOMMAND.re = (
"^:(?P<srv>\S+) 421 (?P<me>\S+) "
"(?P<cmd>\S+) :(?P<data>.*)")
ERR_UNKNOWNCOMMAND.tpl = (
':{c.srv} 421 {c.nick} '
'{cmd} :Unknown command')
ERR_UNKNOWNCOMMAND.params = ['srv', 'me', 'cmd', 'data']
ERR_NOMOTD = retcode(422)
ERR_NOMOTD.name = "ERR_NOMOTD"
ERR_NOMOTD.re = (
"^:(?P<srv>\S+) 422 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NOMOTD.tpl = (
':{c.srv} 422 {c.nick} '
':MOTD File is missing')
ERR_NOMOTD.params = ['srv', 'me', 'data']
ERR_NOADMININFO = retcode(423)
ERR_NOADMININFO.name = "ERR_NOADMININFO"
ERR_NOADMININFO.re = (
"^:(?P<srv>\S+) 423 (?P<me>\S+) "
"(?P<server>\S+) :(?P<data>.*)")
ERR_NOADMININFO.tpl = (
':{c.srv} 423 {c.nick} '
'{server} :No administrative info available')
ERR_NOADMININFO.params = ['srv', 'me', 'server', 'data']
ERR_NONICKNAMEGIVEN = retcode(431)
ERR_NONICKNAMEGIVEN.name = "ERR_NONICKNAMEGIVEN"
ERR_NONICKNAMEGIVEN.re = (
"^:(?P<srv>\S+) 431 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NONICKNAMEGIVEN.tpl = (
':{c.srv} 431 {c.nick} '
':No nickname given')
ERR_NONICKNAMEGIVEN.params = ['srv', 'me', 'data']
ERR_ERRONEUSNICKNAME = retcode(432)
ERR_ERRONEUSNICKNAME.name = "ERR_ERRONEUSNICKNAME"
ERR_ERRONEUSNICKNAME.re = (
"^:(?P<srv>\S+) 432 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
ERR_ERRONEUSNICKNAME.tpl = (
':{c.srv} 432 {c.nick} '
'{nick} :Erroneus nickname')
ERR_ERRONEUSNICKNAME.params = ['srv', 'me', 'nick', 'data']
ERR_NICKNAMEINUSE = retcode(433)
ERR_NICKNAMEINUSE.name = "ERR_NICKNAMEINUSE"
ERR_NICKNAMEINUSE.re = (
"^:(?P<srv>\S+) 433 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
ERR_NICKNAMEINUSE.tpl = (
':{c.srv} 433 {c.nick} '
'{nick} :Nickname is already in use')
ERR_NICKNAMEINUSE.params = ['srv', 'me', 'nick', 'data']
ERR_NICKCOLLISION = retcode(436)
ERR_NICKCOLLISION.name = "ERR_NICKCOLLISION"
ERR_NICKCOLLISION.re = (
"^:(?P<srv>\S+) 436 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
ERR_NICKCOLLISION.tpl = (
':{c.srv} 436 {c.nick} '
'{nick} :Nickname collision KILL')
ERR_NICKCOLLISION.params = ['srv', 'me', 'nick', 'data']
ERR_USERNOTINCHANNEL = retcode(441)
ERR_USERNOTINCHANNEL.name = "ERR_USERNOTINCHANNEL"
ERR_USERNOTINCHANNEL.re = (
"^:(?P<srv>\S+) 441 (?P<me>\S+) "
"(?P<nick>\S+) (?P<channel>\S+) :(?P<data>.*)")
ERR_USERNOTINCHANNEL.tpl = (
':{c.srv} 441 {c.nick} '
"{nick} {channel} :They aren't on that channel")
ERR_USERNOTINCHANNEL.params = ['srv', 'me', 'nick', 'channel', 'data']
ERR_NOTONCHANNEL = retcode(442)
ERR_NOTONCHANNEL.name = "ERR_NOTONCHANNEL"
ERR_NOTONCHANNEL.re = (
"^:(?P<srv>\S+) 442 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_NOTONCHANNEL.tpl = (
':{c.srv} 442 {c.nick} '
"{channel} :You're not on that channel")
ERR_NOTONCHANNEL.params = ['srv', 'me', 'channel', 'data']
ERR_USERONCHANNEL = retcode(443)
ERR_USERONCHANNEL.name = "ERR_USERONCHANNEL"
ERR_USERONCHANNEL.re = (
"^:(?P<srv>\S+) 443 (?P<me>\S+) "
"(?P<nick>\S+) (?P<channel>\S+) :(?P<data>.*)")
ERR_USERONCHANNEL.tpl = (
':{c.srv} 443 {c.nick} '
'{nick} {channel} :is already on channel')
ERR_USERONCHANNEL.params = ['srv', 'me', 'nick', 'channel', 'data']
ERR_NOLOGIN = retcode(444)
ERR_NOLOGIN.name = "ERR_NOLOGIN"
ERR_NOLOGIN.re = (
"^:(?P<srv>\S+) 444 (?P<me>\S+) "
"(?P<nick>\S+) :(?P<data>.*)")
ERR_NOLOGIN.tpl = (
':{c.srv} 444 {c.nick} '
'{nick} :User not logged in')
ERR_NOLOGIN.params = ['srv', 'me', 'nick', 'data']
ERR_SUMMONDISABLED = retcode(445)
ERR_SUMMONDISABLED.name = "ERR_SUMMONDISABLED"
ERR_SUMMONDISABLED.re = (
"^:(?P<srv>\S+) 445 (?P<me>\S+) "
":(?P<data>.*)")
ERR_SUMMONDISABLED.tpl = (
':{c.srv} 445 {c.nick} '
':SUMMON has been disabled')
ERR_SUMMONDISABLED.params = ['srv', 'me', 'data']
ERR_USERSDISABLED = retcode(446)
ERR_USERSDISABLED.name = "ERR_USERSDISABLED"
ERR_USERSDISABLED.re = (
"^:(?P<srv>\S+) 446 (?P<me>\S+) "
":(?P<data>.*)")
ERR_USERSDISABLED.tpl = (
':{c.srv} 446 {c.nick} '
':USERS has been disabled')
ERR_USERSDISABLED.params = ['srv', 'me', 'data']
ERR_NOTREGISTERED = retcode(451)
ERR_NOTREGISTERED.name = "ERR_NOTREGISTERED"
ERR_NOTREGISTERED.re = (
"^:(?P<srv>\S+) 451 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NOTREGISTERED.tpl = (
':{c.srv} 451 {c.nick} '
':You have not registered')
ERR_NOTREGISTERED.params = ['srv', 'me', 'data']
ERR_NEEDMOREPARAMS = retcode(461)
ERR_NEEDMOREPARAMS.name = "ERR_NEEDMOREPARAMS"
ERR_NEEDMOREPARAMS.re = (
"^:(?P<srv>\S+) 461 (?P<me>\S+) "
"(?P<cmd>\S+) :(?P<data>.*)")
ERR_NEEDMOREPARAMS.tpl = (
':{c.srv} 461 {c.nick} '
'{cmd} :Not enough parameters')
ERR_NEEDMOREPARAMS.params = ['srv', 'me', 'cmd', 'data']
ERR_ALREADYREGISTRED = retcode(462)
ERR_ALREADYREGISTRED.name = "ERR_ALREADYREGISTRED"
ERR_ALREADYREGISTRED.re = (
"^:(?P<srv>\S+) 462 (?P<me>\S+) "
":(?P<data>.*)")
ERR_ALREADYREGISTRED.tpl = (
':{c.srv} 462 {c.nick} '
':You may not reregister')
ERR_ALREADYREGISTRED.params = ['srv', 'me', 'data']
ERR_NOPERMFORHOST = retcode(463)
ERR_NOPERMFORHOST.name = "ERR_NOPERMFORHOST"
ERR_NOPERMFORHOST.re = (
"^:(?P<srv>\S+) 463 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NOPERMFORHOST.tpl = (
':{c.srv} 463 {c.nick} '
":Your host isn't among the privileged")
ERR_NOPERMFORHOST.params = ['srv', 'me', 'data']
ERR_PASSWDMISMATCH = retcode(464)
ERR_PASSWDMISMATCH.name = "ERR_PASSWDMISMATCH"
ERR_PASSWDMISMATCH.re = (
"^:(?P<srv>\S+) 464 (?P<me>\S+) "
":(?P<data>.*)")
ERR_PASSWDMISMATCH.tpl = (
':{c.srv} 464 {c.nick} '
':Password incorrect')
ERR_PASSWDMISMATCH.params = ['srv', 'me', 'data']
ERR_YOUREBANNEDCREEP = retcode(465)
ERR_YOUREBANNEDCREEP.name = "ERR_YOUREBANNEDCREEP"
ERR_YOUREBANNEDCREEP.re = (
"^:(?P<srv>\S+) 465 (?P<me>\S+) "
":(?P<data>.*)")
ERR_YOUREBANNEDCREEP.tpl = (
':{c.srv} 465 {c.nick} '
':You are banned from this server')
ERR_YOUREBANNEDCREEP.params = ['srv', 'me', 'data']
ERR_KEYSET = retcode(467)
ERR_KEYSET.name = "ERR_KEYSET"
ERR_KEYSET.re = (
"^:(?P<srv>\S+) 467 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_KEYSET.tpl = (
':{c.srv} 467 {c.nick} '
'{channel} :Channel key already set')
ERR_KEYSET.params = ['srv', 'me', 'channel', 'data']
ERR_CHANNELISFULL = retcode(471)
ERR_CHANNELISFULL.name = "ERR_CHANNELISFULL"
ERR_CHANNELISFULL.re = (
"^:(?P<srv>\S+) 471 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_CHANNELISFULL.tpl = (
':{c.srv} 471 {c.nick} '
'{channel} :Cannot join channel (+l)')
ERR_CHANNELISFULL.params = ['srv', 'me', 'channel', 'data']
ERR_UNKNOWNMODE = retcode(472)
ERR_UNKNOWNMODE.name = "ERR_UNKNOWNMODE"
ERR_UNKNOWNMODE.re = (
"^:(?P<srv>\S+) 472 (?P<me>\S+) "
"(?P<char>\S+) :(?P<data>.*)")
ERR_UNKNOWNMODE.tpl = (
':{c.srv} 472 {c.nick} '
'{char} :is unknown mode char to me')
ERR_UNKNOWNMODE.params = ['srv', 'me', 'char', 'data']
ERR_INVITEONLYCHAN = retcode(473)
ERR_INVITEONLYCHAN.name = "ERR_INVITEONLYCHAN"
ERR_INVITEONLYCHAN.re = (
"^:(?P<srv>\S+) 473 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_INVITEONLYCHAN.tpl = (
':{c.srv} 473 {c.nick} '
'{channel} :Cannot join channel (+i)')
ERR_INVITEONLYCHAN.params = ['srv', 'me', 'channel', 'data']
ERR_BANNEDFROMCHAN = retcode(474)
ERR_BANNEDFROMCHAN.name = "ERR_BANNEDFROMCHAN"
ERR_BANNEDFROMCHAN.re = (
"^:(?P<srv>\S+) 474 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_BANNEDFROMCHAN.tpl = (
':{c.srv} 474 {c.nick} '
'{channel} :Cannot join channel (+b)')
ERR_BANNEDFROMCHAN.params = ['srv', 'me', 'channel', 'data']
ERR_BADCHANNELKEY = retcode(475)
ERR_BADCHANNELKEY.name = "ERR_BADCHANNELKEY"
ERR_BADCHANNELKEY.re = (
"^:(?P<srv>\S+) 475 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_BADCHANNELKEY.tpl = (
':{c.srv} 475 {c.nick} '
'{channel} :Cannot join channel (+k)')
ERR_BADCHANNELKEY.params = ['srv', 'me', 'channel', 'data']
ERR_NOPRIVILEGES = retcode(481)
ERR_NOPRIVILEGES.name = "ERR_NOPRIVILEGES"
ERR_NOPRIVILEGES.re = (
"^:(?P<srv>\S+) 481 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NOPRIVILEGES.tpl = (
':{c.srv} 481 {c.nick} '
":Permission Denied- You're not an IRC operator")
ERR_NOPRIVILEGES.params = ['srv', 'me', 'data']
ERR_CHANOPRIVSNEEDED = retcode(482)
ERR_CHANOPRIVSNEEDED.name = "ERR_CHANOPRIVSNEEDED"
ERR_CHANOPRIVSNEEDED.re = (
"^:(?P<srv>\S+) 482 (?P<me>\S+) "
"(?P<channel>\S+) :(?P<data>.*)")
ERR_CHANOPRIVSNEEDED.tpl = (
':{c.srv} 482 {c.nick} '
"{channel} :You're not channel operator")
ERR_CHANOPRIVSNEEDED.params = ['srv', 'me', 'channel', 'data']
ERR_CANTKILLSERVER = retcode(483)
ERR_CANTKILLSERVER.name = "ERR_CANTKILLSERVER"
ERR_CANTKILLSERVER.re = (
"^:(?P<srv>\S+) 483 (?P<me>\S+) "
":(?P<data>.*)")
ERR_CANTKILLSERVER.tpl = (
':{c.srv} 483 {c.nick} '
':You cant kill a server!')
ERR_CANTKILLSERVER.params = ['srv', 'me', 'data']
ERR_NOOPERHOST = retcode(491)
ERR_NOOPERHOST.name = "ERR_NOOPERHOST"
ERR_NOOPERHOST.re = (
"^:(?P<srv>\S+) 491 (?P<me>\S+) "
":(?P<data>.*)")
ERR_NOOPERHOST.tpl = (
':{c.srv} 491 {c.nick} '
':No O-lines for your host')
ERR_NOOPERHOST.params = ['srv', 'me', 'data']
ERR_UMODEUNKNOWNFLAG = retcode(501)
ERR_UMODEUNKNOWNFLAG.name = "ERR_UMODEUNKNOWNFLAG"
ERR_UMODEUNKNOWNFLAG.re = (
"^:(?P<srv>\S+) 501 (?P<me>\S+) "
":(?P<data>.*)")
ERR_UMODEUNKNOWNFLAG.tpl = (
':{c.srv} 501 {c.nick} '
':Unknown MODE flag')
ERR_UMODEUNKNOWNFLAG.params = ['srv', 'me', 'data']
ERR_USERSDONTMATCH = retcode(502)
ERR_USERSDONTMATCH.name = "ERR_USERSDONTMATCH"
ERR_USERSDONTMATCH.re = (
"^:(?P<srv>\S+) 502 (?P<me>\S+) "
":(?P<data>.*)")
ERR_USERSDONTMATCH.tpl = (
':{c.srv} 502 {c.nick} '
':Cant change mode for other users')
ERR_USERSDONTMATCH.params = ['srv', 'me', 'data']
RETCODES = {
200: RPL_TRACELINK,
201: RPL_TRACECONNECTING,
202: RPL_TRACEHANDSHAKE,
203: RPL_TRACEUNKNOWN,
204: RPL_TRACEOPERATOR,
205: RPL_TRACEUSER,
206: RPL_TRACESERVER,
208: RPL_TRACENEWTYPE,
211: RPL_STATSLINKINFO,
212: RPL_STATSCOMMANDS,
213: RPL_STATSCLINE,
214: RPL_STATSNLINE,
215: RPL_STATSILINE,
216: RPL_STATSKLINE,
218: RPL_STATSYLINE,
219: RPL_ENDOFSTATS,
221: RPL_UMODEIS,
241: RPL_STATSLLINE,
242: RPL_STATSUPTIME,
243: RPL_STATSOLINE,
244: RPL_STATSHLINE,
251: RPL_LUSERCLIENT,
252: RPL_LUSEROP,
253: RPL_LUSERUNKNOWN,
254: RPL_LUSERCHANNELS,
255: RPL_LUSERME,
256: RPL_ADMINME,
257: RPL_ADMINLOC1,
258: RPL_ADMINLOC2,
259: RPL_ADMINEMAIL,
261: RPL_TRACELOG,
301: RPL_AWAY,
302: RPL_USERHOST,
303: RPL_ISON,
305: RPL_UNAWAY,
306: RPL_NOWAWAY,
311: RPL_WHOISUSER,
312: RPL_WHOISSERVER,
313: RPL_WHOISOPERATOR,
314: RPL_WHOWASUSER,
315: RPL_ENDOFWHO,
317: RPL_WHOISIDLE,
318: RPL_ENDOFWHOIS,
319: RPL_WHOISCHANNELS,
321: RPL_LISTSTART,
322: RPL_LIST,
323: RPL_LISTEND,
324: RPL_CHANNELMODEIS,
331: RPL_NOTOPIC,
332: RPL_TOPIC,
341: RPL_INVITING,
342: RPL_SUMMONING,
351: RPL_VERSION,
352: RPL_WHOREPLY,
353: RPL_NAMREPLY,
364: RPL_LINKS,
365: RPL_ENDOFLINKS,
366: RPL_ENDOFNAMES,
367: RPL_BANLIST,
368: RPL_ENDOFBANLIST,
369: RPL_ENDOFWHOWAS,
371: RPL_INFO,
372: RPL_MOTD,
374: RPL_ENDOFINFO,
375: RPL_MOTDSTART,
376: RPL_ENDOFMOTD,
381: RPL_YOUREOPER,
382: RPL_REHASHING,
391: RPL_TIME,
392: RPL_USERSSTART,
393: RPL_USERS,
394: RPL_ENDOFUSERS,
395: RPL_NOUSERS,
401: ERR_NOSUCHNICK,
402: ERR_NOSUCHSERVER,
403: ERR_NOSUCHCHANNEL,
404: ERR_CANNOTSENDTOCHAN,
405: ERR_TOOMANYCHANNELS,
406: ERR_WASNOSUCHNICK,
407: ERR_TOOMANYTARGETS,
409: ERR_NOORIGIN,
411: ERR_NORECIPIENT,
412: ERR_NOTEXTTOSEND,
413: ERR_NOTOPLEVEL,
414: ERR_WILDTOPLEVEL,
421: ERR_UNKNOWNCOMMAND,
422: ERR_NOMOTD,
423: ERR_NOADMININFO,
431: ERR_NONICKNAMEGIVEN,
432: ERR_ERRONEUSNICKNAME,
433: ERR_NICKNAMEINUSE,
436: ERR_NICKCOLLISION,
441: ERR_USERNOTINCHANNEL,
442: ERR_NOTONCHANNEL,
443: ERR_USERONCHANNEL,
444: ERR_NOLOGIN,
445: ERR_SUMMONDISABLED,
446: ERR_USERSDISABLED,
451: ERR_NOTREGISTERED,
461: ERR_NEEDMOREPARAMS,
462: ERR_ALREADYREGISTRED,
463: ERR_NOPERMFORHOST,
464: ERR_PASSWDMISMATCH,
465: ERR_YOUREBANNEDCREEP,
467: ERR_KEYSET,
471: ERR_CHANNELISFULL,
472: ERR_UNKNOWNMODE,
473: ERR_INVITEONLYCHAN,
474: ERR_BANNEDFROMCHAN,
475: ERR_BADCHANNELKEY,
481: ERR_NOPRIVILEGES,
482: ERR_CHANOPRIVSNEEDED,
483: ERR_CANTKILLSERVER,
491: ERR_NOOPERHOST,
501: ERR_UMODEUNKNOWNFLAG,
502: ERR_USERSDONTMATCH,
}
| mit |
ky822/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
Euphoria-OS-Legacy/android_external_skia | platform_tools/android/tests/ordered_set_tests.py | 145 | 2880 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test OrderedSet.
"""
import sys
import test_variables
import unittest
sys.path.append(test_variables.GYP_GEN_DIR)
from vars_dict_lib import OrderedSet
def create_dummy_var(i):
return 'dummy_var' + str(i)
class OrderedSetTest(unittest.TestCase):
def setUp(self):
self.__set = OrderedSet()
def test_methods(self):
"""Test methods on OrderedSet.
"""
RANGE = 10
for i in range(RANGE):
dummy_var = create_dummy_var(i)
# Add to the list. This should succeed.
self.__set.add(dummy_var)
self.assertEqual(len(self.__set), i+1)
self.assertTrue(dummy_var in self.__set)
self.assertEqual(self.__set[i], dummy_var)
# Now attempt to add it again. This should fail.
self.__set.add(dummy_var)
self.assertEqual(len(self.__set), i+1)
self.assertEqual(self.__set[i], dummy_var)
# Test iterator.
counter = 0
for set_member in self.__set:
self.assertEqual(create_dummy_var(counter), set_member)
counter += 1
self.assertEqual(counter, len(self.__set))
# Now test removal.
for i in range(RANGE):
dummy_var = create_dummy_var(i)
self.__set.remove(dummy_var)
self.assertEqual(len(self.__set), RANGE-i-1)
self.assertFalse(dummy_var in self.__set)
# Test reset(), for a range of ranges.
for subrange in range(RANGE):
for i in range(subrange):
self.__set.add(create_dummy_var(i))
self.assertEqual(len(self.__set), subrange)
self.__set.reset()
self.assertEqual(len(self.__set), 0)
def test_set(self):
"""Test OrderedSet.set().
"""
# Create a set with dummy values.
my_set = OrderedSet()
RANGE = 10
for i in range(RANGE):
my_set.add(create_dummy_var(i))
my_len = len(my_set)
self.assertEqual(my_len, RANGE)
# Copy it to another set.
other_set = OrderedSet()
self.assertEqual(len(other_set), 0)
other_set.set(my_set)
# Both sets should contain the same values, in the same order.
iterator = iter(my_set)
for item in other_set:
self.assertTrue(item == iterator.next())
with self.assertRaises(StopIteration):
iterator.next()
self.assertEqual(my_len, len(other_set))
# But the sets are different. Changing one will not affect the other.
self.assertFalse(other_set is my_set)
other_var = 'something_else'
other_set.add(other_var)
self.assertEqual(my_len + 1, len(other_set))
self.assertEqual(my_len, len(my_set))
self.assertNotIn(other_var, my_set)
def main():
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(OrderedSetTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
| bsd-3-clause |
ehashman/oh-mainline | vendor/packages/docutils/test/test_transforms/test_target_notes.py | 19 | 2301 | #! /usr/bin/env python
# $Id: test_target_notes.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for `docutils.transforms.references.TargetNotes` (via
`docutils.transforms.universal.LastReaderPending`).
"""
from __init__ import DocutilsTestSupport
from docutils.transforms.references import PropagateTargets, \
AnonymousHyperlinks, IndirectHyperlinks, ExternalTargets, \
InternalTargets, DanglingReferences, Footnotes
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['tables_of_contents'] = ((PropagateTargets, AnonymousHyperlinks,
IndirectHyperlinks,
ExternalTargets, InternalTargets,
DanglingReferences,
), [
["""\
.. _target: http://exammple.org
A reference to a target_.
.. target-notes::
""",
"""\
<document source="test data">
<target ids="target" names="target" refuri="http://exammple.org">
<paragraph>
A reference to a \n\
<reference name="target" refuri="http://exammple.org">
target
\n\
<footnote_reference auto="1" ids="id2" refid="id1">
.
<footnote auto="1" ids="id1" names="TARGET_NOTE:\\ id1">
<paragraph>
<reference refuri="http://exammple.org">
http://exammple.org
"""],
["""\
.. _target: http://exammple.org
A reference to a target_.
.. target-notes:: :class: custom
""",
"""\
<document source="test data">
<target ids="target" names="target" refuri="http://exammple.org">
<paragraph>
A reference to a \n\
<reference name="target" refuri="http://exammple.org">
target
<inline classes="custom">
\n\
<footnote_reference auto="1" classes="custom" ids="id2" refid="id1">
.
<footnote auto="1" ids="id1" names="TARGET_NOTE:\\ id1">
<paragraph>
<reference refuri="http://exammple.org">
http://exammple.org
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 |
sonaht/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_shell.py | 65 | 6605 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, 2016 Ritesh Khadgaray <khadgaray () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vm_shell
short_description: Execute a process in VM
description:
- Start a program in a VM without the need for network connection
version_added: 2.1
author: "Ritesh Khadgaray (@ritzk)"
notes:
- Tested on vSphere 5.5
- Only the first match against vm_id is used, even if there are multiple matches
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter:
description:
- The datacenter hosting the VM
- Will help speed up search
required: False
default: None
cluster:
description:
- The cluster hosting the VM
- Will help speed up search
required: False
default: None
vm_id:
description:
- The identification for the VM
required: True
vm_id_type:
description:
- The identification tag for the VM
default: vm_name
choices:
- 'uuid'
- 'dns_name'
- 'inventory_path'
- 'vm_name'
required: False
vm_username:
description:
- The user to connect to the VM.
required: False
default: None
vm_password:
description:
- The password used to login to the VM.
required: False
default: None
vm_shell:
description:
- The absolute path to the program to start. On Linux this is executed via bash.
required: True
vm_shell_args:
description:
- The argument to the program.
required: False
default: None
vm_shell_env:
description:
- Comma separated list of envirnoment variable, specified in the guest OS notation
required: False
default: None
vm_shell_cwd:
description:
- The current working directory of the application from which it will be run
required: False
default: None
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: shell execution
local_action:
module: vmware_vm_shell
hostname: myVSphere
username: myUsername
password: mySecret
datacenter: myDatacenter
vm_id: NameOfVM
vm_username: root
vm_password: superSecret
vm_shell: /bin/echo
vm_shell_args: " $var >> myFile "
vm_shell_env:
- "PATH=/bin"
- "VAR=test"
vm_shell_cwd: "/tmp"
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py
def execute_command(content, vm, vm_username, vm_password, program_path, args="", env=None, cwd=None):
creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
cmdspec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args, envVariables=env, programPath=program_path, workingDirectory=cwd)
cmdpid = content.guestOperationsManager.processManager.StartProgramInGuest(vm=vm, auth=creds, spec=cmdspec)
return cmdpid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter=dict(default=None, type='str'),
cluster=dict(default=None, type='str'),
vm_id=dict(required=True, type='str'),
vm_id_type=dict(default='vm_name', type='str', choices=['inventory_path', 'uuid', 'dns_name', 'vm_name']),
vm_username=dict(required=False, type='str'),
vm_password=dict(required=False, type='str', no_log=True),
vm_shell=dict(required=True, type='str'),
vm_shell_args=dict(default=" ", type='str'),
vm_shell_env=dict(default=None, type='list'),
vm_shell_cwd=dict(default=None, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(changed=False, msg='pyvmomi is required for this module')
try:
p = module.params
datacenter_name = p['datacenter']
cluster_name = p['cluster']
content = connect_to_api(module)
datacenter = None
if datacenter_name:
datacenter = find_datacenter_by_name(content, datacenter_name)
if not datacenter:
module.fail_json(changed=False, msg="datacenter not found")
cluster = None
if cluster_name:
cluster = find_cluster_by_name(content, cluster_name, datacenter)
if not cluster:
module.fail_json(changed=False, msg="cluster not found")
vm = find_vm_by_id(content, p['vm_id'], p['vm_id_type'], datacenter, cluster)
if not vm:
module.fail_json(msg='VM not found')
msg = execute_command(content, vm, p['vm_username'], p['vm_password'],
p['vm_shell'], p['vm_shell_args'], p['vm_shell_env'], p['vm_shell_cwd'])
module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=msg)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(changed=False, msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(changed=False, msg=method_fault.msg)
except Exception as e:
module.fail_json(changed=False, msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
theseyi/WhereHows | wherehows-etl/src/main/resources/jython/requests/packages/urllib3/util/url.py | 713 | 5879 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| apache-2.0 |
GoogleCloudPlatform/professional-services | tools/hive-bigquery/hive_to_bigquery/mysql_component.py | 2 | 11109 | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to handle MySQL related utilities."""
import logging
import pymysql
from hive_to_bigquery import custom_exceptions
from hive_to_bigquery.database_component import DatabaseComponent
from hive_to_bigquery.properties_reader import PropertiesReader
logger = logging.getLogger('Hive2BigQuery')
class MySQLComponent(DatabaseComponent):
"""MySQL component to handle functions related to it.
Has utilities which perform MySQL operations using the pymysql
connection, such as creating table, dropping a table, executing a query,
executing a transaction etc.
Attributes:
host (str): Hostname of the Cloud SQL instance.
user (str): Username to be used.
password (str): Password to be used.
database (str): Database to be connected.
port (int): Port to be used.
connection (pymysql.connections.Connection): Connection to Cloud SQL
instance.
"""
def __init__(self, **kwargs):
logger.debug("Initializing Cloud SQL Component")
super(MySQLComponent, self).__init__(**kwargs)
def __str__(self):
return "MySQL - Host {0} username {1} database {2} port {3}".format(
self.host, self.user, self.database, self.port)
def get_connection(self):
"""Connects to the MySQL database.
Returns:
pymysql.connections.Connection: pymysql connection object.
"""
logger.debug("Getting MySQL Connection")
try:
logger.debug(self)
connection = pymysql.connect(host=self.host,
user=self.user,
password=self.password,
database=self.database,
port=self.port)
return connection
except pymysql.err.DatabaseError as error:
raise custom_exceptions.ConnectionError from error
def get_cursor(self):
"""Gets the cursor object.
Returns:
pymysql.cursors.Cursor: pymysql cursor object.
"""
logger.debug("Getting cursor")
cursor = self.connection.cursor()
return cursor
def execute_transaction(self, query):
"""Executes a transaction and commits to the database.
Args:
query (str): Transaction query to be executed.
"""
try:
cursor = self.get_cursor()
cursor.execute(query)
self.connection.commit()
except pymysql.err.OperationalError as error:
self.connection.rollback()
logger.error("Failed to commit transaction {} to Cloud SQL "
"table".format(query))
raise custom_exceptions.MySQLExecutionError from error
def execute_query(self, query):
"""Executes query and returns the results.
Args:
query (str): Query to be executed.
Returns:
List: Results of the query.
"""
cursor = self.get_cursor()
try:
cursor.execute(query)
return cursor.fetchall()
except pymysql.err.OperationalError as error:
logger.error(
"Failed in querying Cloud SQL table - {}".format(query))
raise custom_exceptions.MySQLExecutionError from error
def check_table_exists(self, table_name):
"""Checks whether the provided MySQL table exists.
Args:
table_name (str): MySQL table name.
"""
results = self.execute_query("SHOW TABLES")
for name in results:
if table_name == name[0]:
return True
return False
def drop_table(self, table_name):
"""Drops tracking table.
Args:
table_name (str): MySQL table name.
"""
cursor = self.get_cursor()
if self.check_table_exists(table_name):
try:
cursor.execute("DROP TABLE {}".format(table_name))
logger.debug("Dropped table %s", table_name)
except pymysql.err.DatabaseError as error:
logger.error("Failed dropping table %s", table_name)
raise custom_exceptions.MySQLExecutionError from error
def drop_table_if_empty(self, table_name):
"""Drops tracking table if empty.
Args:
table_name (str): MySQL table name.
"""
if self.check_table_exists(table_name):
results = self.execute_query(
"SELECT COUNT(*) FROM {}".format(table_name))
n_rows = results[0][0]
if n_rows == 0:
self.drop_table(table_name)
logger.info(
"Dropped the empty tracking table {}".format(table_name))
def check_tracking_table_exists(self, hive_table_model):
"""Checks whether the tracking table exists.
Checks whether the tracking table exists from the previous migration
run (if any) and updates the attributes (is_first_run,
tracking_table_name, is_inc_col_present, inc_col, inc_col_type) of the
HiveTableModel instance.
Args:
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
"""
results = self.execute_query(
"SELECT tracking_table_name,inc_col_present,inc_col_name,"
"inc_col_type from {} WHERE hive_database='{}' AND "
"hive_table='{}' AND bq_table='{}'".format(
PropertiesReader.get('tracking_metatable_name'),
hive_table_model.db_name, hive_table_model.table_name,
hive_table_model.bq_table_name))
if results:
hive_table_model.is_first_run = False
hive_table_model.tracking_table_name = results[0][0]
hive_table_model.inc_col = results[0][2]
hive_table_model.inc_col_type = results[0][3]
if hive_table_model.inc_col == 'None':
hive_table_model.inc_col = None
hive_table_model.inc_col_type = None
if hive_table_model.is_first_run:
logger.debug("Tracking table does not exist")
else:
logger.debug("Tracking table %s found",
hive_table_model.tracking_table_name)
def update_tracking_meta_table(self, hive_table_model, mode):
"""Updates the tracking metatable with details of the Hive table."""
if mode == "INSERT":
query = "INSERT INTO {} (hive_database,hive_table,bq_table," \
"tracking_table_name,inc_col_present,inc_col_name," \
"inc_col_type) VALUES('{}','{}','{}','{}',{},'{}'," \
"'{}')".format(
PropertiesReader.get('tracking_metatable_name'),
hive_table_model.db_name, hive_table_model.table_name,
hive_table_model.bq_table_name,
hive_table_model.tracking_table_name,
hive_table_model.is_inc_col_present, hive_table_model.inc_col,
hive_table_model.inc_col_type)
if mode == "DELETE":
query = "DELETE FROM {} WHERE hive_database='{}' AND " \
"hive_table='{}' AND bq_table='{}'".format(
PropertiesReader.get('tracking_metatable_name'),
hive_table_model.db_name, hive_table_model.table_name,
hive_table_model.bq_table_name)
self.execute_query(query)
def create_tracking_table(self, hive_table_model):
"""Creates tracking table in CloudSQL instance.
Args:
hive_table_model (:class:`HiveTableModel`): Wrapper to Hive table
details.
"""
self.update_tracking_meta_table(hive_table_model, "INSERT")
logger.info("Tracking meta table {} is updated".format(
PropertiesReader.get('tracking_metatable_name')))
if hive_table_model.is_inc_col_present:
query = """CREATE TABLE IF NOT EXISTS {} (
id INT COMMENT 'Integer counter to identify the migration run
in which a data file has been detected',
table_name VARCHAR(255) COMMENT 'Hive stage table name',
inc_col_min VARCHAR(255) COMMENT 'Minimum value of the
incremental column',
inc_col_max VARCHAR(255) COMMENT 'Maximum value of the
incremental column',
clause VARCHAR(255) COMMENT 'Clause used while loading data
into staging table',
file_path VARCHAR(255) COMMENT 'HDFS file path',
gcs_copy_status VARCHAR(10) COMMENT 'Status of Hadoop distcp
operation to copy the file to GCS',
gcs_file_path VARCHAR(255) COMMENT 'Path of the file copied
into GCS',
bq_job_id VARCHAR(255) COMMENT 'BigQuery load job ID',
bq_job_retries TINYINT COMMENT 'Number of retries of BigQuery
load job',
bq_job_status VARCHAR(10) COMMENT 'Status of BigQuery load job'
)""".format(hive_table_model.tracking_table_name)
else:
query = """CREATE TABLE IF NOT EXISTS {} (
table_name VARCHAR(255) COMMENT 'Hive stage table name',
clause VARCHAR(255) COMMENT 'Clause used while loading data
into staging table',
file_path VARCHAR(255) COMMENT 'HDFS file path',
gcs_copy_status VARCHAR(10) COMMENT 'Status of Hadoop distcp
operation to copy the file to GCS',
gcs_file_path VARCHAR(255) COMMENT 'Path of the file copied
into GCS',
bq_job_id VARCHAR(255) COMMENT 'BigQuery load job ID',
bq_job_retries TINYINT COMMENT 'Number of retries of BigQuery
load job',
bq_job_status VARCHAR(10) COMMENT 'Status of BigQuery load job'
)""".format(hive_table_model.tracking_table_name)
self.execute_query(query)
logger.info("Tracking table {} is created".format(
hive_table_model.tracking_table_name))
| apache-2.0 |
chouseknecht/ansible | test/units/modules/network/f5/test_bigiq_application_fastl4_udp.py | 21 | 7808 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigiq_application_fastl4_udp import ModuleParameters
from library.modules.bigiq_application_fastl4_udp import ModuleManager
from library.modules.bigiq_application_fastl4_udp import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigiq_application_fastl4_udp import ModuleParameters
from ansible.modules.network.f5.bigiq_application_fastl4_udp import ModuleManager
from ansible.modules.network.f5.bigiq_application_fastl4_udp import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='my description',
service_environment='bar',
servers=[
dict(
address='1.2.3.4',
port=8080
),
dict(
address='5.6.7.8',
port=8000
)
],
inbound_virtual=dict(
address='2.2.2.2',
netmask='255.255.255.255',
port=80
)
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.config_set_name == 'foo'
assert p.sub_path == 'foo'
assert p.http_profile == 'profile_http'
assert p.service_environment == 'bar'
assert len(p.servers) == 2
assert 'address' in p.servers[0]
assert 'port' in p.servers[0]
assert 'address' in p.servers[1]
assert 'port' in p.servers[1]
assert p.servers[0]['address'] == '1.2.3.4'
assert p.servers[0]['port'] == 8080
assert p.servers[1]['address'] == '5.6.7.8'
assert p.servers[1]['port'] == 8000
assert 'address' in p.inbound_virtual
assert 'netmask' in p.inbound_virtual
assert 'port' in p.inbound_virtual
assert p.inbound_virtual['address'] == '2.2.2.2'
assert p.inbound_virtual['netmask'] == '255.255.255.255'
assert p.inbound_virtual['port'] == 80
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
try:
self.p1 = patch('library.modules.bigiq_application_fastl4_udp.bigiq_version')
self.p2 = patch('library.modules.bigiq_application_fastl4_udp.ModuleParameters.template_reference')
self.p3 = patch('library.modules.bigiq_application_fastl4_udp.ModuleParameters.default_device_reference')
self.m1 = self.p1.start()
self.m2 = self.p2.start()
self.m3 = self.p3.start()
self.m1.return_value = '6.1.0'
self.m2.return_value = Mock(return_value='https://localhost/mgmt/foobar1')
self.m3.return_value = Mock(return_value='https://localhost/mgmt/foobar3')
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigiq_application_fastl4_udp.bigiq_version')
self.p2 = patch('ansible.modules.network.f5.bigiq_application_fastl4_udp.ModuleParameters.template_reference')
self.p3 = patch('ansible.modules.network.f5.bigiq_application_fastl4_udp.ModuleParameters.default_device_reference')
self.m1 = self.p1.start()
self.m2 = self.p2.start()
self.m3 = self.p3.start()
self.m1.return_value = '6.1.0'
self.m2.return_value = Mock(return_value='https://localhost/mgmt/foobar1')
self.m3.return_value = Mock(return_value='https://localhost/mgmt/foobar3')
def tearDown(self):
self.patcher1.stop()
self.p1.stop()
self.p2.stop()
self.p3.stop()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='my description',
service_environment='bar',
servers=[
dict(
address='1.2.3.4',
port=8080
),
dict(
address='5.6.7.8',
port=8000
)
],
inbound_virtual=dict(
address='2.2.2.2',
netmask='255.255.255.255',
port=80
),
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.check_bigiq_version = Mock(return_value=True)
mm.has_no_service_environment = Mock(return_value=False)
mm.wait_for_apply_template_task = Mock(return_value=True)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(side_effect=[False, True])
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'my description'
def test_bigiq_version_raises(self):
set_module_args(dict(
name='foo',
description='my description',
service_environment='bar',
servers=[
dict(
address='1.2.3.4',
port=8080
),
dict(
address='5.6.7.8',
port=8000
)
],
inbound_virtual=dict(
address='2.2.2.2',
netmask='255.255.255.255',
port=80
),
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
msg = 'Module supports only BIGIQ version 6.0.x or lower.'
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
| gpl-3.0 |
DLR-SC/DataFinder | src/datafinder/gui/user/common/widget/property/editors/list_editor.py | 1 | 14838 | #
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Dialog for editing list property values.
"""
from PyQt4 import QtGui, QtCore
from PyQt4.Qt import Qt
from datafinder.core.configuration.properties import constants
from datafinder.core.configuration.properties import property_type
from datafinder.gui.gen.user.list_property_dialog_ui import Ui_listPropertyDialog
from datafinder.gui.user.common.util import extractPyObject, determineDisplayRepresentation
__version__ = "$Revision-Id:$"
class ListEditor(QtGui.QLineEdit):
"""
This widget widget is a specialized line editor which allows
the manipulation of list data.
"""
_SUPPORTED_PROPERTY_TYPES = [
constants.STRING_TYPE, constants.DATETIME_TYPE, constants.NUMBER_TYPE, constants.BOOLEAN_TYPE]
def __init__(self, restrictions, editorFactory, initData=list(), parent=None):
"""
@param restrictions: List-specific restrictions.
see: L{<property_type.ListType>datafinder.core.configuration.properties.property_type.ListType}
@type restrictions: C{dict}
@param editorFactory: Factory for creation of value editors.
@type editorFactory: C{EditorFactory}
@param initData: Initial list data.
@type initData: C{list} of C{object}
@param parent: Parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget}
"""
QtGui.QLineEdit.__init__(self, parent)
self._editorFactory = editorFactory
self.value = initData
self._allowedPropertyTypes = restrictions.get(constants.ALLOWED_SUB_TYPES, self._SUPPORTED_PROPERTY_TYPES)
self._removeUnsupportedPropertyTypes()
self._editButton = QtGui.QPushButton("...", self)
self._editButton.setMaximumSize(QtCore.QSize(20, 20))
self.setReadOnly(True)
self.setStyleSheet("QLineEdit { padding-right: 0px; } ")
self.setText(determineDisplayRepresentation(initData))
self._showEditorSlot()
self.connect(self._editButton, QtCore.SIGNAL("clicked()"), self._showEditorSlot)
def _removeUnsupportedPropertyTypes(self):
removes = list()
for propertyTypeName in self._allowedPropertyTypes:
if not propertyTypeName in self._SUPPORTED_PROPERTY_TYPES:
removes.append(propertyTypeName)
for propertyTypeName in removes:
self._allowedPropertyTypes.remove(propertyTypeName)
def resizeEvent(self, _):
""" Ensures that the edit button is in the right corner of the line editor. """
size = self._editButton.maximumSize()
self._editButton.move(self.rect().right() - size.width(),
(self.rect().bottom() + 1 - size.height()) / 2)
def _showEditorSlot(self):
""" Slot which shows the list editor. """
listPropertyEditor = _ListPropertyDialog(self._allowedPropertyTypes, self.value, self._editorFactory, self)
listPropertyEditor.exec_()
self.setText(determineDisplayRepresentation(self.value))
self.setFocus(Qt.OtherFocusReason)
def text(self):
""" Overwrites the text behavior. """
return self.value
class _ListPropertyDialog(QtGui.QDialog, Ui_listPropertyDialog):
"""
This dialog shows the content of a list property and supports the editing the property.
"""
def __init__(self, allowedPropertyTypes, propertyValues, editorFactory, parent=None):
"""
Constructor.
@param allowedPropertyTypes: Names of available property types.
@type allowedPropertyTypes: C{list} of C{unicode}
@param propertyValues: Initial list data.
@type propertyValues: C{list} of C{object}
@param editorFactory: Factory for creation of value editors.
@type editorFactory: L{EditorFactory<datafinder.gui.user.common.widget.property.editors.factory.Editorfactory>}
@param parent: Parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget}
"""
QtGui.QDialog.__init__(self, parent)
Ui_listPropertyDialog.__init__(self)
self.setupUi(self)
self._initState = propertyValues
self._allowedPropertyTypes = allowedPropertyTypes
self._editorFactory = editorFactory
self._initializeSignalConnections()
self._initializeEditButtonsEnabledState()
self._initializeTable(propertyValues)
def _initializeSignalConnections(self):
self.connect(self.tableWidget, QtCore.SIGNAL("itemSelectionChanged()"), self.itemSelectionChangedSlot)
self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self.addSlot)
self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self.editSlot)
self.connect(self.deleteButton, QtCore.SIGNAL("clicked()"), self.deleteSlot)
self.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), self.accepted)
self.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), self.rejected)
def _initializeEditButtonsEnabledState(self):
if not self._allowedPropertyTypes:
self.addButton.setEnabled(False)
self._setEnableStateOfItemEditingButtons(False)
def _setEnableStateOfItemEditingButtons(self, isEnabled):
self.deleteButton.setEnabled(isEnabled)
self.editButton.setEnabled(isEnabled)
def _initializeTable(self, propertyValues):
"""
Adds the property values into the table model.
@param propertyValues: Property values which should be displayed in the editor.
@type propertyValues: C{list} of C{object}
"""
self.tableWidget.setItemDelegate(
_ListPropertyItemDelegate(self._allowedPropertyTypes, self._editorFactory, self))
self.tableWidget.setColumnWidth(1, 150)
for row, value in enumerate(propertyValues):
propertyType = property_type.determinePropertyTypeConstant(value)
isEditingSupported = self._isEditingSupported(propertyType)
self._addPropertyItem(row, value, propertyType, isEditingSupported)
self.emit(QtCore.SIGNAL("layoutChanged()"))
def _isEditingSupported(self, propertyType):
if propertyType in self._allowedPropertyTypes:
return True
else:
return False
def _addPropertyItem(self, row, value, propertyType, isEditingSupported):
self.tableWidget.insertRow(row)
self.tableWidget.setRowHeight(row, 20)
self.tableWidget.setItem(row, 0 , QtGui.QTableWidgetItem(propertyType))
self.tableWidget.setItem(row, 1, _TableWidgetItem(value, isEditingSupported))
def addSlot(self):
""" This slot is called when a new item should be inserted. """
self.tableWidget.insertRow(self.tableWidget.model().rowCount())
self.tableWidget.setRowHeight(self.tableWidget.model().rowCount() - 1, 20)
self.tableWidget.setItem(self.tableWidget.rowCount() - 1, 0, QtGui.QTableWidgetItem(""))
self.tableWidget.setItem(self.tableWidget.rowCount() - 1, 1, _TableWidgetItem())
self.tableWidget.setFocus()
self.tableWidget.editItem(self.tableWidget.item(self.tableWidget.rowCount() - 1, 0))
def editSlot(self):
""" This slot is called when the edit button is pressed. """
item = self.tableWidget.currentItem()
self.tableWidget.editItem(item)
def deleteSlot(self):
""" Slot is called when the delete button is pressed. """
index = self.tableWidget.selectionModel().currentIndex()
self.tableWidget.model().removeRow(index.row())
if self.tableWidget.rowCount() == 0:
self._setEnableStateOfItemEditingButtons(False)
def itemSelectionChangedSlot(self):
""" De-activates buttons for properties which cannot be properly edited. """
if self.tableWidget.selectedItems():
item = self.tableWidget.selectedItems()[0]
if item.column() == 0: # Only items of the value column contain the editing information
item = self.tableWidget.item(item.row(), 1)
if item.isEditingSupported:
self._setEnableStateOfItemEditingButtons(True)
else:
self._setEnableStateOfItemEditingButtons(False)
def accepted(self):
""" This slot is called when the user clicks OK. It returns the edited list. """
properties = list()
for i in range(self.tableWidget.model().rowCount()):
item = self.tableWidget.item(i, 1)
if not item.value is None:
properties.append(item.value)
self.parent().value = properties
QtGui.QDialog.accept(self)
def rejected(self):
"""
This slot is called when the user cancels the dialog. It returns the
list that was passed to dialog as initData.
"""
self.parent().value = self._initState
QtGui.QDialog.reject(self)
class _ListPropertyItemDelegate(QtGui.QItemDelegate):
"""
Delegate for the property modification.
"""
def __init__(self, propertyTypeNames, editorFactory, parent=None):
"""
Constructor.
@param propertyTypeNames: Names of available property types.
@type propertyTypeNames: C{list} of C{unicode}
@param editorFactory: Factory for creation of value editors.
@type editorFactory: L{EditorFactory<datafinder.gui.user.common.widget.property.editors.factory.Editorfactory>}
@param parent: Parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget}
"""
QtGui.QItemDelegate.__init__(self, parent)
self._factory = editorFactory
self._propertyTypes = [QtCore.QString(unicode(propType)) for propType in propertyTypeNames]
def createEditor(self, parent, _, index):
"""
@see: L{createEditor<PyQt4.QtGui.QItemDelegate.createEditor>}
"""
typeIndex = index.model().index(index.row(), 0)
valueType = index.model().data(typeIndex, QtCore.Qt.DisplayRole).toString()
if index.column() == 0:
editor = QtGui.QComboBox(parent)
editor.addItems(self._propertyTypes)
if valueType in self._propertyTypes:
editor.setCurrentIndex(self._propertyTypes.index(valueType))
elif index.column() == 1:
editor = self._factory.createEditor(parent, valueType)
if not editor.isEnabled():
return None
return editor
def setModelData(self, editor, model, index):
"""
@see: QtGui.QItemDelegate#setModelData
"""
returnValue = self._factory.getValueFromEditor(editor)
model.setData(index, QtCore.QVariant(returnValue))
def setEditorData(self, editor, index):
"""
@see: L{setEditorData<PyQt4.QtGui.QItemDelegate.setEditorData>}
"""
if index.column() == 1:
value = self.parent().tableWidget.item(index.row(), 1).value
self._factory.setEditorValue(editor, value)
else:
QtGui.QItemDelegate.setEditorData(self, editor, index)
class _TableWidgetItem(QtGui.QTableWidgetItem):
""" Specific implementation of C{QTableWidgetItem}. """
def __init__(self, value=None, isEditingSupported=True):
"""
@param value: Value which is represented by this item.
@type value: C{object}
@param isEditingSupported: Flag which indicates whether the value can be edited or not.
@type isEditingSupported: C{bool}
"""
QtGui.QTableWidgetItem.__init__(self)
self._value = value
self._isEditingSupported = isEditingSupported
@property
def isEditingSupported(self):
""" Read-only access to the isEditingSupported flag."""
return self._isEditingSupported
@property
def value(self):
""" Read-only access to the value."""
return self._value
def data(self, role):
""" Ensures that the values are correctly rendered. """
if role == Qt.DisplayRole:
return QtCore.QVariant(determineDisplayRepresentation(self._value))
else:
return QtGui.QTableWidgetItem(self).data(role)
def setData(self, _, value):
""" Converts value given as QVariant to a Python object. """
value = extractPyObject(value)
self._value = value
| bsd-3-clause |
laperry1/android_external_chromium_org | chrome/browser/resources/chromeos/chromevox/tools/generate_test_messages.py | 62 | 1237 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Generates test_messages.js from an extension message json file.'''
import optparse
import sys
def Die(message):
'''Prints an error message and exit the program.'''
print >>sys.stderr, message
sys.exit(1)
# Tempalte for the test_messages.js.
_JS_TEMPLATE = '''// GENERATED FROM %(in_file)s
goog.provide('cvox.TestMessages');
cvox.TestMessages = %(json)s;
'''
def main():
parser = optparse.OptionParser(description=__doc__)
parser.add_option('-o', '--output_file', action='store',
metavar='SPEC',
help=('Where to output the generated deps file.'))
options, args = parser.parse_args()
if options.output_file is None:
Die('Output file not specified')
if len(args) != 1:
Die('Exactly one input file must be specified')
in_file_name = args[0];
with open(in_file_name) as in_file:
json = in_file.read().strip()
with open(options.output_file, 'w') as out_file:
out_file.write(_JS_TEMPLATE % {'in_file': in_file_name, 'json': json})
if __name__ == '__main__':
main()
| bsd-3-clause |
mano3m/CouchPotatoServer | libs/sqlalchemy/engine/ddl.py | 35 | 7176 | # engine/ddl.py
# Copyright (C) 2009-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle CREATE/DROP workflow."""
from sqlalchemy import engine, schema
from sqlalchemy.sql import util as sql_util
class DDLBase(schema.SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables and set(tables) or None
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or \
not self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_create_sequence(self, sequence):
return self.dialect.supports_sequences and \
(
(not self.dialect.sequences_optional or
not sequence.optional) and
(
not self.checkfirst or
not self.dialect.has_sequence(
self.connection,
sequence.name,
schema=sequence.schema)
)
)
def visit_metadata(self, metadata):
if self.tables:
tables = self.tables
else:
tables = metadata.tables.values()
collection = [t for t in sql_util.sort_tables(tables)
if self._can_create_table(t)]
seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)]
metadata.dispatch.before_create(metadata, self.connection,
tables=collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for seq in seq_coll:
self.traverse_single(seq, create_ok=True)
for table in collection:
self.traverse_single(table, create_ok=True)
metadata.dispatch.after_create(metadata, self.connection,
tables=collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def visit_table(self, table, create_ok=False):
if not create_ok and not self._can_create_table(table):
return
table.dispatch.before_create(table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.connection.execute(schema.CreateTable(table))
if hasattr(table, 'indexes'):
for index in table.indexes:
self.traverse_single(index)
table.dispatch.after_create(table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
self.connection.execute(schema.CreateSequence(sequence))
def visit_index(self, index):
self.connection.execute(schema.CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def visit_metadata(self, metadata):
if self.tables:
tables = self.tables
else:
tables = metadata.tables.values()
collection = [t for t in reversed(sql_util.sort_tables(tables))
if self._can_drop_table(t)]
seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_drop_sequence(s)]
metadata.dispatch.before_drop(metadata, self.connection,
tables=collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for table in collection:
self.traverse_single(table, drop_ok=True)
for seq in seq_coll:
self.traverse_single(seq, drop_ok=True)
metadata.dispatch.after_drop(metadata, self.connection,
tables=collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_drop_sequence(self, sequence):
return self.dialect.supports_sequences and \
((not self.dialect.sequences_optional or
not sequence.optional) and
(not self.checkfirst or
self.dialect.has_sequence(
self.connection,
sequence.name,
schema=sequence.schema))
)
def visit_index(self, index):
self.connection.execute(schema.DropIndex(index))
def visit_table(self, table, drop_ok=False):
if not drop_ok and not self._can_drop_table(table):
return
table.dispatch.before_drop(table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.connection.execute(schema.DropTable(table))
table.dispatch.after_drop(table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not self._can_drop_sequence(sequence):
return
self.connection.execute(schema.DropSequence(sequence))
| gpl-3.0 |
chripell/pyasicam | view.py | 1 | 8948 | #!/usr/bin/python3
import datetime
import os
import pyasicam as pc
import sys
import numpy as np
import cairo
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GdkPixbuf, GLib, Gdk, Gio, GObject
def gamma_stretch(im, gamma):
if im.dtype != np.float:
im = im.astype(np.float)
im /= 255.0
im = im ** gamma
return im * 255.0
class Camera(pc.Camera):
def __init__(self, i):
super().__init__(i)
self.mean = 1
self.im_num = 0
self.im_mean = None
self.done = 0
self.OpenCamera()
self.InitCamera()
self.set_exposure_ms(1000)
caps = self.GetCameraProperty()
self.SetROIFormat(caps.MaxWidth, caps.MaxHeight, 1, pc.IMG_Y8)
# Trick to make ASI120MC work for short exposures.
# if self.GetCameraProperty().IsUSB3Camera == 0:
# self.SetControlValue(pc.BANDWIDTHOVERLOAD, 20, False)
def capture(self):
self.StartExposure(False)
def get_image(self):
st = self.GetExpStatus()
if st == pc.EXP_FAILED:
# raise RuntimeError("Exposure failed")
print("Exposure failed")
self.capture()
return None
if st == pc.EXP_IDLE:
raise RuntimeError("Exposure not started")
if self.GetExpStatus() == pc.EXP_WORKING:
return None
img = self.GetDataAfterExp()
self.capture()
if self.mean <= 1:
return img
self.im_num += 1
if self.im_num == 1:
self.im_mean = img.astype(np.float)
else:
self.im_mean += img
if self.im_num >= self.mean:
self.im_num = 0
return self.im_mean / self.mean
def set_exposure_ms(self, ms):
self.SetControlValue(pc.EXPOSURE, int(ms*1000), False)
def get_exposure_ms(self):
return self.GetControlValue(pc.EXPOSURE)[0] / 1000.0
def set_gain(self, gain):
self.SetControlValue(pc.GAIN, gain, False)
def get_gain(self):
return self.GetControlValue(pc.GAIN)[0]
class Histo:
def __init__(self):
self.data = None
self.stretch = 0
self.stretch_from = 0
self.stretch_to = 127
self.bins = [2*i - 0.5 for i in range(129)]
def get(self):
self.histo = Gtk.DrawingArea()
self.histo.connect("draw", self.draw)
self.histo.set_property("height-request", 100)
return self.histo
def draw(self, w, cr):
if self.data is None:
return
width = w.get_allocated_width()
height = w.get_allocated_height()
cr.set_source_rgb(0.7, 0.1, 0.1)
cr.move_to(0, 0)
cr.line_to(width, 0)
cr.line_to(width, height)
cr.line_to(0, height)
cr.line_to(0, 0)
cr.stroke()
xscale = width / 127.0
yscale = float(height) / np.max(self.data)
if self.stretch_from >= 0:
cr.set_source_rgb(0.9, 0.6, 0.6)
cr.rectangle(self.stretch_from * xscale, 0,
(self.stretch_to - self.stretch_from) * xscale,
height)
cr.fill()
cr.set_source_rgb(0.1, 0.1, 0.1)
cr.new_path()
cr.move_to(0, height - 0)
cr.line_to(0, height - self.data[0] * yscale)
for i in range(1, 128):
cr.line_to(i * xscale, height - self.data[i] * yscale)
cr.line_to(width, height - 0)
cr.close_path()
cr.fill()
def apply(self, im):
size = im.shape[0]
if im.shape[1] > size:
size = im.shape[1]
n = 1
while size > 256:
size /= 2
n *= 2
self.data = np.histogram(im[::n, ::n], bins=self.bins)[0]
if self.stretch > 0 and self.stretch < 100:
cs = np.cumsum(self.data)/np.sum(self.data) * 100
self.stretch_from = len(cs[cs <= self.stretch])
self.stretch_to = len(cs[cs <= 100 - self.stretch])
s_to = self.stretch_to / 127.0 * 255.0
s_from = self.stretch_from / 127.0 * 255.0
scale = 255.0 / (s_to - s_from)
im = np.clip((im - s_from) * scale, 0, 255)
else:
self.stretch_from = 0
self.stretch_to = 127
self.histo.queue_draw()
return im
class Mainwindow(Gtk.Window):
def __init__(self, cam, *args, **kwargs):
Gtk.Window.__init__(
self, default_width=800, default_height=600,
title="PYASICAM", *args, **kwargs)
self.cam = cam
self.surface = None
self.gamma = 1.0
cam.capture()
scrolledImage = Gtk.ScrolledWindow()
self.image = Gtk.DrawingArea()
self.image.connect("draw", self.draw)
self.image.connect("configure-event", self.configure)
scrolledImage.add(self.image)
mainHBox = Gtk.HBox()
mainHBox.pack_start(scrolledImage, True, True, 0)
controlsBox = Gtk.VBox()
mainHBox.pack_start(controlsBox, False, False, 0)
self.add_controls(controlsBox)
self.add(mainHBox)
self.connect("delete-event", Gtk.main_quit)
self.periodic = GLib.timeout_add(100, self.get_image)
self.show_all()
def process_image(self, im):
im = self.histo.apply(im)
if self.gamma != 1.0:
im = gamma_stretch(im, self.gamma)
self.publish_image(im)
def get_image(self):
im = self.cam.get_image()
if im is not None:
self.im = im
self.process_image(im)
self.periodic = GLib.timeout_add(100, self.get_image)
def publish_image(self, im):
if im.dtype != np.uint8:
im = im.astype(np.uint8)
im32 = np.dstack((im, im, im, im))
self.surface = cairo.ImageSurface.create_for_data(
im32, cairo.FORMAT_RGB24, im.shape[1], im.shape[0])
self.image.set_size_request(im.shape[1], im.shape[0])
self.image.queue_draw()
def draw(self, w, cr):
if not self.surface:
return
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
def configure(self, w, ev):
if not self.surface:
return
self.image.queue_draw()
def create_text_control(self, text, ini, cb):
box = Gtk.HBox()
label = Gtk.Label()
label.set_markup(text)
label.set_justify(Gtk.Justification.RIGHT)
box.pack_start(label, False, False, 0)
entry = Gtk.Entry()
entry.set_text(ini)
entry.connect("activate", cb)
box.pack_start(entry, True, False, 0)
return box
def add_controls(self, box):
self.histo = Histo()
box.pack_start(self.histo.get(), False, False, 0)
exp_ms = self.create_text_control(
"Exposure (ms):",
"%.2f" % self.cam.get_exposure_ms(),
self.set_exposure_ms)
box.pack_start(exp_ms, False, False, 0)
gain = self.create_text_control(
"Gain:",
"%d" % self.cam.get_gain(),
self.set_gain)
box.pack_start(gain, False, False, 0)
mean = self.create_text_control(
"Mean:",
"%d" % self.cam.mean,
self.set_mean)
box.pack_start(mean, False, False, 0)
stretch = self.create_text_control(
"Stretch:",
"%d" % self.histo.stretch,
self.set_stretch)
box.pack_start(stretch, False, False, 0)
gamma = self.create_text_control(
"Gamma:",
"%d" % self.gamma,
self.set_gamma)
box.pack_start(gamma, False, False, 0)
def set_exposure_ms(self, e):
try:
self.cam.set_exposure_ms(float(e.get_text()))
except:
pass
e.set_text("%.2f" % self.cam.get_exposure_ms())
def set_gain(self, e):
try:
self.cam.set_gain(int(e.get_text()))
except:
pass
e.set_text("%d" % self.cam.get_gain())
def set_mean(self, e):
try:
self.cam.mean = int(e.get_text())
except:
pass
e.set_text("%d" % self.cam.mean)
def set_stretch(self, e):
try:
self.histo.stretch = int(e.get_text())
except:
pass
e.set_text("%d" % self.histo.stretch)
def set_gamma(self, e):
try:
self.gamma = float(e.get_text())
except:
pass
e.set_text("%f" % self.gamma)
if len(sys.argv) < 2:
print("Usage: %s [list/camera no.]" % sys.argv[0])
sys.exit(1)
n = pc.GetNumOfConnectedCameras()
if sys.argv[1] == "list":
for i in range(n):
c = pc.Camera(i)
prop = c.GetCameraProperty()
print("%d: %s" % (i, prop.Name.decode("utf-8")))
sys.exit(0)
cam = Camera(int(sys.argv[1]))
window = Mainwindow(cam)
Gtk.main()
| gpl-3.0 |
lynus/hadoop-over-rdma | src/contrib/hod/hodlib/Hod/hod.py | 182 | 29420 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# -*- python -*-
import sys, os, getpass, pprint, re, cPickle, random, shutil, time, errno
import hodlib.Common.logger
from hodlib.ServiceRegistry.serviceRegistry import svcrgy
from hodlib.Common.xmlrpc import hodXRClient
from hodlib.Common.util import to_http_url, get_exception_string
from hodlib.Common.util import get_exception_error_string
from hodlib.Common.util import hodInterrupt, HodInterruptException
from hodlib.Common.util import HOD_INTERRUPTED_CODE
from hodlib.Common.nodepoolutil import NodePoolUtil
from hodlib.Hod.hadoop import hadoopCluster, hadoopScript
CLUSTER_DATA_FILE = 'clusters'
INVALID_STATE_FILE_MSGS = \
[
"Requested operation cannot be performed. Cannot read %s: " + \
"Permission denied.",
"Requested operation cannot be performed. " + \
"Cannot write to %s: Permission denied.",
"Requested operation cannot be performed. " + \
"Cannot read/write to %s: Permission denied.",
"Cannot update %s: Permission denied. " + \
"Cluster is deallocated, but info and list " + \
"operations might show incorrect information.",
]
class hodState:
def __init__(self, store):
self.__store = store
self.__stateFile = None
self.__init_store()
self.__STORE_EXT = ".state"
def __init_store(self):
if not os.path.exists(self.__store):
os.mkdir(self.__store)
def __set_state_file(self, id=None):
if id:
self.__stateFile = os.path.join(self.__store, "%s%s" % (id,
self.__STORE_EXT))
else:
for item in os.listdir(self.__store):
if item.endswith(self.__STORE_EXT):
self.__stateFile = os.path.join(self.__store, item)
def get_state_file(self):
return self.__stateFile
def checkStateFile(self, id=None, modes=(os.R_OK,)):
# is state file exists/readable/writable/both?
self.__set_state_file(id)
# return true if file doesn't exist, because HOD CAN create
# state file and so WILL have permissions to read and/or write
try:
os.stat(self.__stateFile)
except OSError, err:
if err.errno == errno.ENOENT: # error 2 (no such file)
return True
# file exists
ret = True
for mode in modes:
ret = ret and os.access(self.__stateFile, mode)
return ret
def read(self, id=None):
info = {}
self.__set_state_file(id)
if self.__stateFile:
if os.path.isfile(self.__stateFile):
stateFile = open(self.__stateFile, 'r')
try:
info = cPickle.load(stateFile)
except EOFError:
pass
stateFile.close()
return info
def write(self, id, info):
self.__set_state_file(id)
if not os.path.exists(self.__stateFile):
self.clear(id)
stateFile = open(self.__stateFile, 'w')
cPickle.dump(info, stateFile)
stateFile.close()
def clear(self, id=None):
self.__set_state_file(id)
if self.__stateFile and os.path.exists(self.__stateFile):
os.remove(self.__stateFile)
else:
for item in os.listdir(self.__store):
if item.endswith(self.__STORE_EXT):
os.remove(item)
class hodRunner:
def __init__(self, cfg, log=None, cluster=None):
self.__hodhelp = hodHelp()
self.__ops = self.__hodhelp.ops
self.__cfg = cfg
self.__npd = self.__cfg['nodepooldesc']
self.__opCode = 0
self.__user = getpass.getuser()
self.__registry = None
self.__baseLogger = None
# Allowing to pass in log object to help testing - a stub can be passed in
if log is None:
self.__setup_logger()
else:
self.__log = log
self.__userState = hodState(self.__cfg['hod']['user_state'])
self.__clusterState = None
self.__clusterStateInfo = { 'env' : None, 'hdfs' : None, 'mapred' : None }
# Allowing to pass in log object to help testing - a stib can be passed in
if cluster is None:
self.__cluster = hadoopCluster(self.__cfg, self.__log)
else:
self.__cluster = cluster
def __setup_logger(self):
self.__baseLogger = hodlib.Common.logger.hodLog('hod')
self.__log = self.__baseLogger.add_logger(self.__user )
if self.__cfg['hod']['stream']:
self.__baseLogger.add_stream(level=self.__cfg['hod']['debug'],
addToLoggerNames=(self.__user ,))
if self.__cfg['hod'].has_key('syslog-address'):
self.__baseLogger.add_syslog(self.__cfg['hod']['syslog-address'],
level=self.__cfg['hod']['debug'],
addToLoggerNames=(self.__user ,))
def get_logger(self):
return self.__log
def __setup_cluster_logger(self, directory):
self.__baseLogger.add_file(logDirectory=directory, level=4,
backupCount=self.__cfg['hod']['log-rollover-count'],
addToLoggerNames=(self.__user ,))
def __setup_cluster_state(self, directory):
self.__clusterState = hodState(directory)
def __norm_cluster_dir(self, directory):
directory = os.path.expanduser(directory)
if not os.path.isabs(directory):
directory = os.path.join(self.__cfg['hod']['original-dir'], directory)
directory = os.path.abspath(directory)
return directory
def __setup_service_registry(self):
cfg = self.__cfg['hod'].copy()
cfg['debug'] = 0
self.__registry = svcrgy(cfg, self.__log)
self.__registry.start()
self.__log.debug(self.__registry.getXMLRPCAddr())
self.__cfg['hod']['xrs-address'] = self.__registry.getXMLRPCAddr()
self.__cfg['ringmaster']['svcrgy-addr'] = self.__cfg['hod']['xrs-address']
def __set_cluster_state_info(self, env, hdfs, mapred, ring, jobid, min, max):
self.__clusterStateInfo['env'] = env
self.__clusterStateInfo['hdfs'] = "http://%s" % hdfs
self.__clusterStateInfo['mapred'] = "http://%s" % mapred
self.__clusterStateInfo['ring'] = ring
self.__clusterStateInfo['jobid'] = jobid
self.__clusterStateInfo['min'] = min
self.__clusterStateInfo['max'] = max
def __set_user_state_info(self, info):
userState = self.__userState.read(CLUSTER_DATA_FILE)
for key in info.keys():
userState[key] = info[key]
self.__userState.write(CLUSTER_DATA_FILE, userState)
def __remove_cluster(self, clusterDir):
clusterInfo = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterInfo:
del(clusterInfo[clusterDir])
self.__userState.write(CLUSTER_DATA_FILE, clusterInfo)
def __cleanup(self):
if self.__registry: self.__registry.stop()
def __check_operation(self, operation):
opList = operation.split()
if not opList[0] in self.__ops:
self.__log.critical("Invalid hod operation specified: %s" % operation)
self._op_help(None)
self.__opCode = 2
return opList
def __adjustMasterFailureCountConfig(self, nodeCount):
# This method adjusts the ringmaster.max-master-failures variable
# to a value that is bounded by the a function of the number of
# nodes.
maxFailures = self.__cfg['ringmaster']['max-master-failures']
# Count number of masters required - depends on which services
# are external
masters = 0
if not self.__cfg['gridservice-hdfs']['external']:
masters += 1
if not self.__cfg['gridservice-mapred']['external']:
masters += 1
# So, if there are n nodes and m masters, we look atleast for
# all masters to come up. Therefore, atleast m nodes should be
# good, which means a maximum of n-m master nodes can fail.
maxFailedNodes = nodeCount - masters
# The configured max number of failures is now bounded by this
# number.
self.__cfg['ringmaster']['max-master-failures'] = \
min(maxFailures, maxFailedNodes)
def _op_allocate(self, args):
operation = "allocate"
argLength = len(args)
min = 0
max = 0
errorFlag = False
errorMsgs = []
if argLength == 3:
nodes = args[2]
clusterDir = self.__norm_cluster_dir(args[1])
if not os.path.exists(clusterDir):
try:
os.makedirs(clusterDir)
except OSError, err:
errorFlag = True
errorMsgs.append("Could not create cluster directory. %s" \
% (str(err)))
elif not os.path.isdir(clusterDir):
errorFlag = True
errorMsgs.append( \
"Invalid cluster directory (--hod.clusterdir or -d) : " + \
clusterDir + " : Not a directory")
if int(nodes) < 3 :
errorFlag = True
errorMsgs.append("Invalid nodecount (--hod.nodecount or -n) : " + \
"Must be >= 3. Given nodes: %s" % nodes)
if errorFlag:
for msg in errorMsgs:
self.__log.critical(msg)
self.__opCode = 3
return
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, \
(os.R_OK, os.W_OK)):
self.__log.critical(INVALID_STATE_FILE_MSGS[2] % \
self.__userState.get_state_file())
self.__opCode = 1
return
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterList.keys():
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
# Check if the job is not running. Only then can we safely
# allocate another cluster. Otherwise the user would need
# to deallocate and free up resources himself.
if clusterInfo.has_key('jobid') and \
self.__cluster.is_cluster_deallocated(clusterInfo['jobid']):
self.__log.warn("Found a dead cluster at cluster directory '%s'. Deallocating it to allocate a new one." % (clusterDir))
self.__remove_cluster(clusterDir)
self.__clusterState.clear()
else:
self.__log.critical("Found a previously allocated cluster at cluster directory '%s'. HOD cannot determine if this cluster can be automatically deallocated. Deallocate the cluster if it is unused." % (clusterDir))
self.__opCode = 12
return
self.__setup_cluster_logger(clusterDir)
(status, message) = self.__cluster.is_valid_account()
if status is not 0:
if message:
for line in message:
self.__log.critical("verify-account output: %s" % line)
self.__log.critical("Cluster cannot be allocated because account verification failed. " \
+ "verify-account returned exit code: %s." % status)
self.__opCode = 4
return
else:
self.__log.debug("verify-account returned zero exit code.")
if message:
self.__log.debug("verify-account output: %s" % message)
if re.match('\d+-\d+', nodes):
(min, max) = nodes.split("-")
min = int(min)
max = int(max)
else:
try:
nodes = int(nodes)
min = nodes
max = nodes
except ValueError:
print self.__hodhelp.help(operation)
self.__log.critical(
"%s operation requires a pos_int value for n(nodecount)." %
operation)
self.__opCode = 3
else:
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
self.__opCode = self.__cluster.check_cluster(clusterInfo)
if self.__opCode == 0 or self.__opCode == 15:
self.__setup_service_registry()
if hodInterrupt.isSet():
self.__cleanup()
raise HodInterruptException()
self.__log.debug("Service Registry started.")
self.__adjustMasterFailureCountConfig(nodes)
try:
allocateStatus = self.__cluster.allocate(clusterDir, min, max)
except HodInterruptException, h:
self.__cleanup()
raise h
# Allocation has gone through.
# Don't care about interrupts any more
try:
if allocateStatus == 0:
self.__set_cluster_state_info(os.environ,
self.__cluster.hdfsInfo,
self.__cluster.mapredInfo,
self.__cluster.ringmasterXRS,
self.__cluster.jobId,
min, max)
self.__setup_cluster_state(clusterDir)
self.__clusterState.write(self.__cluster.jobId,
self.__clusterStateInfo)
# Do we need to check for interrupts here ??
self.__set_user_state_info(
{ clusterDir : self.__cluster.jobId, } )
self.__opCode = allocateStatus
except Exception, e:
# Some unknown problem.
self.__cleanup()
self.__cluster.deallocate(clusterDir, self.__clusterStateInfo)
self.__opCode = 1
raise Exception(e)
elif self.__opCode == 12:
self.__log.critical("Cluster %s already allocated." % clusterDir)
elif self.__opCode == 10:
self.__log.critical("dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
elif self.__opCode == 13:
self.__log.warn("hdfs dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
elif self.__opCode == 14:
self.__log.warn("mapred dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
if self.__opCode > 0 and self.__opCode != 15:
self.__log.critical("Cannot allocate cluster %s" % clusterDir)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires two arguments. " % operation
+ "A cluster directory and a nodecount.")
self.__opCode = 3
def _is_cluster_allocated(self, clusterDir):
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo != {}:
return True
return False
def _op_deallocate(self, args):
operation = "deallocate"
argLength = len(args)
if argLength == 2:
clusterDir = self.__norm_cluster_dir(args[1])
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
self.__handle_invalid_cluster_directory(clusterDir, cleanUp=True)
else:
self.__opCode = \
self.__cluster.deallocate(clusterDir, clusterInfo)
# irrespective of whether deallocate failed or not\
# remove the cluster state.
self.__clusterState.clear()
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.W_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[3] % \
self.__userState.get_state_file())
self.__opCode = 1
return
self.__remove_cluster(clusterDir)
else:
self.__handle_invalid_cluster_directory(clusterDir, cleanUp=True)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires one argument. " % operation
+ "A cluster path.")
self.__opCode = 3
def _op_list(self, args):
operation = 'list'
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
for path in clusterList.keys():
if not os.path.isdir(path):
self.__log.info("cluster state unknown\t%s\t%s" % (clusterList[path], path))
continue
self.__setup_cluster_state(path)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
# something wrong with the cluster directory.
self.__log.info("cluster state unknown\t%s\t%s" % (clusterList[path], path))
continue
clusterStatus = self.__cluster.check_cluster(clusterInfo)
if clusterStatus == 12:
self.__log.info("alive\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 10:
self.__log.info("dead\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 13:
self.__log.info("hdfs dead\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 14:
self.__log.info("mapred dead\t%s\t%s" % (clusterList[path], path))
def _op_info(self, args):
operation = 'info'
argLength = len(args)
if argLength == 2:
clusterDir = self.__norm_cluster_dir(args[1])
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
# something wrong with the cluster directory.
self.__handle_invalid_cluster_directory(clusterDir)
else:
clusterStatus = self.__cluster.check_cluster(clusterInfo)
if clusterStatus == 12:
self.__print_cluster_info(clusterInfo)
self.__log.info("hadoop-site.xml at %s" % clusterDir)
elif clusterStatus == 10:
self.__log.critical("%s cluster is dead" % clusterDir)
elif clusterStatus == 13:
self.__log.warn("%s cluster hdfs is dead" % clusterDir)
elif clusterStatus == 14:
self.__log.warn("%s cluster mapred is dead" % clusterDir)
if clusterStatus != 12:
if clusterStatus == 15:
self.__log.critical("Cluster %s not allocated." % clusterDir)
else:
self.__print_cluster_info(clusterInfo)
self.__log.info("hadoop-site.xml at %s" % clusterDir)
self.__opCode = clusterStatus
else:
self.__handle_invalid_cluster_directory(clusterDir)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires one argument. " % operation
+ "A cluster path.")
self.__opCode = 3
def __handle_invalid_cluster_directory(self, clusterDir, cleanUp=False):
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.R_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[0] % \
self.__userState.get_state_file())
self.__opCode = 1
return
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterList.keys():
# previously allocated cluster.
self.__log.critical("Cannot find information for cluster with id '%s' in previously allocated cluster directory '%s'." % (clusterList[clusterDir], clusterDir))
if cleanUp:
self.__cluster.delete_job(clusterList[clusterDir])
self.__log.critical("Freeing resources allocated to the cluster.")
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.W_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[1] % \
self.__userState.get_state_file())
self.__opCode = 1
return
self.__remove_cluster(clusterDir)
self.__opCode = 3
else:
if not os.path.exists(clusterDir):
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : No such directory")
elif not os.path.isdir(clusterDir):
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : Not a directory")
else:
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : Not tied to any allocated cluster.")
self.__opCode = 15
def __print_cluster_info(self, clusterInfo):
keys = clusterInfo.keys()
_dict = {
'jobid' : 'Cluster Id', 'min' : 'Nodecount',
'hdfs' : 'HDFS UI at' , 'mapred' : 'Mapred UI at'
}
for key in _dict.keys():
if clusterInfo.has_key(key):
self.__log.info("%s %s" % (_dict[key], clusterInfo[key]))
if clusterInfo.has_key('ring'):
self.__log.debug("%s\t%s" % ('Ringmaster at ', clusterInfo['ring']))
if self.__cfg['hod']['debug'] == 4:
for var in clusterInfo['env'].keys():
self.__log.debug("%s = %s" % (var, clusterInfo['env'][var]))
def _op_help(self, arg):
if arg == None or arg.__len__() != 2:
print "hod commands:\n"
for op in self.__ops:
print self.__hodhelp.help(op)
else:
if arg[1] not in self.__ops:
print self.__hodhelp.help('help')
self.__log.critical("Help requested for invalid operation : %s"%arg[1])
self.__opCode = 3
else: print self.__hodhelp.help(arg[1])
def operation(self):
operation = self.__cfg['hod']['operation']
try:
opList = self.__check_operation(operation)
if self.__opCode == 0:
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.R_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[0] % \
self.__userState.get_state_file())
self.__opCode = 1
return self.__opCode
getattr(self, "_op_%s" % opList[0])(opList)
except HodInterruptException, h:
self.__log.critical("op: %s failed because of a process interrupt." \
% operation)
self.__opCode = HOD_INTERRUPTED_CODE
except:
self.__log.critical("op: %s failed: %s" % (operation,
get_exception_error_string()))
self.__log.debug(get_exception_string())
self.__cleanup()
self.__log.debug("return code: %s" % self.__opCode)
return self.__opCode
def script(self):
errorFlag = False
errorMsgs = []
scriptRet = 0 # return from the script, if run
script = self.__cfg['hod']['script']
nodes = self.__cfg['hod']['nodecount']
clusterDir = self.__cfg['hod']['clusterdir']
if not os.path.exists(script):
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : No such file")
elif not os.path.isfile(script):
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : Not a file.")
else:
isExecutable = os.access(script, os.X_OK)
if not isExecutable:
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : Not an executable.")
if not os.path.exists(clusterDir):
try:
os.makedirs(clusterDir)
except OSError, err:
errorFlag = True
errorMsgs.append("Could not create cluster directory. %s" % (str(err)))
elif not os.path.isdir(clusterDir):
errorFlag = True
errorMsgs.append( \
"Invalid cluster directory (--hod.clusterdir or -d) : " + \
clusterDir + " : Not a directory")
if int(self.__cfg['hod']['nodecount']) < 3 :
errorFlag = True
errorMsgs.append("Invalid nodecount (--hod.nodecount or -n) : " + \
"Must be >= 3. Given nodes: %s" % nodes)
if errorFlag:
for msg in errorMsgs:
self.__log.critical(msg)
self.handle_script_exit_code(scriptRet, clusterDir)
sys.exit(3)
try:
self._op_allocate(('allocate', clusterDir, str(nodes)))
if self.__opCode == 0:
if self.__cfg['hod'].has_key('script-wait-time'):
time.sleep(self.__cfg['hod']['script-wait-time'])
self.__log.debug('Slept for %d time. Now going to run the script' % self.__cfg['hod']['script-wait-time'])
if hodInterrupt.isSet():
self.__log.debug('Hod interrupted - not executing script')
else:
scriptRunner = hadoopScript(clusterDir,
self.__cfg['hod']['original-dir'])
self.__opCode = scriptRunner.run(script)
scriptRet = self.__opCode
self.__log.info("Exit code from running the script: %d" % self.__opCode)
else:
self.__log.critical("Error %d in allocating the cluster. Cannot run the script." % self.__opCode)
if hodInterrupt.isSet():
# Got interrupt while executing script. Unsetting it for deallocating
hodInterrupt.setFlag(False)
if self._is_cluster_allocated(clusterDir):
self._op_deallocate(('deallocate', clusterDir))
except HodInterruptException, h:
self.__log.critical("Script failed because of a process interrupt.")
self.__opCode = HOD_INTERRUPTED_CODE
except:
self.__log.critical("script: %s failed: %s" % (script,
get_exception_error_string()))
self.__log.debug(get_exception_string())
self.__cleanup()
self.handle_script_exit_code(scriptRet, clusterDir)
return self.__opCode
def handle_script_exit_code(self, scriptRet, clusterDir):
# We want to give importance to a failed script's exit code, and write out exit code to a file separately
# so users can easily get it if required. This way they can differentiate between the script's exit code
# and hod's exit code.
if os.path.exists(clusterDir):
exit_code_file_name = (os.path.join(clusterDir, 'script.exitcode'))
if scriptRet != 0:
exit_code_file = open(exit_code_file_name, 'w')
print >>exit_code_file, scriptRet
exit_code_file.close()
self.__opCode = scriptRet
else:
#ensure script exit code file is not there:
if (os.path.exists(exit_code_file_name)):
os.remove(exit_code_file_name)
class hodHelp:
def __init__(self):
self.ops = ['allocate', 'deallocate', 'info', 'list','script', 'help']
self.usage_strings = \
{
'allocate' : 'hod allocate -d <clusterdir> -n <nodecount> [OPTIONS]',
'deallocate' : 'hod deallocate -d <clusterdir> [OPTIONS]',
'list' : 'hod list [OPTIONS]',
'info' : 'hod info -d <clusterdir> [OPTIONS]',
'script' :
'hod script -d <clusterdir> -n <nodecount> -s <script> [OPTIONS]',
'help' : 'hod help <OPERATION>',
}
self.description_strings = \
{
'allocate' : "Allocates a cluster of n nodes using the specified \n" + \
" cluster directory to store cluster state \n" + \
" information. The Hadoop site XML is also stored \n" + \
" in this location.\n",
'deallocate' : "Deallocates a cluster using the specified \n" + \
" cluster directory. This operation is also \n" + \
" required to clean up a dead cluster.\n",
'list' : "List all clusters currently allocated by a user, \n" + \
" along with limited status information and the \n" + \
" cluster ID.\n",
'info' : "Provide detailed information on an allocated cluster.\n",
'script' : "Allocates a cluster of n nodes with the given \n" +\
" cluster directory, runs the specified script \n" + \
" using the allocated cluster, and then \n" + \
" deallocates the cluster.\n",
'help' : "Print help for the operation and exit.\n" + \
"Available operations : %s.\n" % self.ops,
}
def usage(self, op):
return "Usage : " + self.usage_strings[op] + "\n" + \
"For full description: hod help " + op + ".\n"
def help(self, op=None):
if op is None:
return "hod <operation> [ARGS] [OPTIONS]\n" + \
"Available operations : %s\n" % self.ops + \
"For help on a particular operation : hod help <operation>.\n" + \
"For all options : hod help options."
else:
return "Usage : " + self.usage_strings[op] + "\n" + \
"Description : " + self.description_strings[op] + \
"For all options : hod help options.\n"
| apache-2.0 |
philippjfr/bokeh | bokeh/sphinxext/bokeh_github.py | 10 | 4909 | ''' Simplify linking to Bokeh Github resources.
This module proved four new roles that can be uses to easily link
to various resources in the Bokeh Github repository:
``:bokeh-commit:`` : link to a specific commit
``:bokeh-issue:`` : link to an issue
``:bokeh-pull:`` : link to a pull request
``:bokeh-tree:`` : (versioned) link to a source tree URL
Examples
--------
The following code::
The repo history shows that :bokeh-commit:`bf19bcb` was made in
in :bokeh-pull:`1698`, which closed :bokeh-issue:`1694`. This included
updating all of the files in the :bokeh-tree:`examples` subdirectory.
yields the output:
The repo history shows that :bokeh-commit:`bf19bcb` was made in
in :bokeh-pull:`1698`,which closed :bokeh-issue:`1694`. This included
updating all of the files in the :bokeh-tree:`examples` subdirectory.
'''
from __future__ import absolute_import
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
BOKEH_GH = "https://github.com/bokeh/bokeh"
def bokeh_commit(name, rawtext, text, lineno, inliner, options=None, content=None):
''' Link to a Bokeh Github issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
'''
app = inliner.document.settings.env.app
node = _make_gh_link_node(app, rawtext, 'commit', 'commit ', 'commit', text, options)
return [node], []
def bokeh_issue(name, rawtext, text, lineno, inliner, options=None, content=None):
''' Link to a Bokeh Github issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
'''
app = inliner.document.settings.env.app
try:
issue_num = int(text)
if issue_num <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'Github issue number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = _make_gh_link_node(app, rawtext, 'issue', '#', 'issues', str(issue_num), options)
return [node], []
def bokeh_pull(name, rawtext, text, lineno, inliner, options=None, content=None):
''' Link to a Bokeh Github issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
'''
app = inliner.document.settings.env.app
try:
issue_num = int(text)
if issue_num <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'Github pull request number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = _make_gh_link_node(app, rawtext, 'pull', 'pull request ', 'pull', str(issue_num), options)
return [node], []
def bokeh_tree(name, rawtext, text, lineno, inliner, options=None, content=None):
''' Link to a URL in the Bokeh GitHub tree, pointing to appropriate tags
for releases, or to master otherwise.
The link text is simply the URL path supplied, so typical usage might
look like:
.. code-block:: none
All of the examples are located in the :bokeh-tree:`examples`
subdirectory of your Bokeh checkout.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
'''
app = inliner.document.settings.env.app
tag = app.env.config['version']
if '-' in tag:
tag = 'master'
url = "%s/tree/%s/%s" % (BOKEH_GH, tag, text)
options = options or {}
set_classes(options)
node = nodes.reference(
rawtext, text, refuri=url, **options)
return [node], []
def _make_gh_link_node(app, rawtext, role, kind, api_type, id, options=None):
''' Return a link to a Bokeh Github resource.
Args:
app (Sphinx app) : current app
rawtext (str) : text being replaced with link node.
role (str) : role name
kind (str) : resource type (issue, pull, etc.)
api_type (str) : type for api link
id : (str) : id of the resource to link to
options (dict) : options dictionary passed to role function
'''
url = "%s/%s/%s" % (BOKEH_GH, api_type, id)
options = options or {}
set_classes(options)
node = nodes.reference(
rawtext, kind + utils.unescape(id), refuri=url, **options)
return node
def setup(app):
app.add_role('bokeh-commit', bokeh_commit)
app.add_role('bokeh-issue', bokeh_issue)
app.add_role('bokeh-pull', bokeh_pull)
app.add_role('bokeh-tree', bokeh_tree)
| bsd-3-clause |
delighted/phantomjs | src/qt/qtwebkit/Source/ThirdParty/gtest/scripts/fuse_gtest_files.py | 314 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include <gtest/...>'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*<(gtest/.+)>')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include <gtest/...>' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include <gtest/gtest-spi.h>'. This file is not
# #included by <gtest/gtest.h>, so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include <gtest/foo.h>' where foo is not gtest-spi.
# We treat it as '#include <gtest/gtest.h>', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include <gtest/gtest.h> more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include <%s>\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| bsd-3-clause |
livioferrante/my-final-project | .mywaflib/waflib/extras/boost.py | 1 | 13891 | #!/usr/bin/env python
# encoding: utf-8
#
# partially based on boost.py written by Gernot Vormayr
# written by Ruediger Sonderfeld <ruediger@c-plusplus.de>, 2008
# modified by Bjoern Michaelsen, 2008
# modified by Luca Fossati, 2008
# rewritten for waf 1.5.1, Thomas Nagy, 2008
# rewritten for waf 1.6.2, Sylvain Rouquette, 2011
'''
This is an extra tool, not bundled with the default waf binary.
To add the boost tool to the waf file:
$ ./waf-light --tools=compat15,boost
or, if you have waf >= 1.6.2
$ ./waf update --files=boost
When using this tool, the wscript will look like:
def options(opt):
opt.load('compiler_cxx boost')
def configure(conf):
conf.load('compiler_cxx boost')
conf.check_boost(lib='system filesystem')
def build(bld):
bld(source='main.cpp', target='app', use='BOOST')
Options are generated, in order to specify the location of boost includes/libraries.
The `check_boost` configuration function allows to specify the used boost libraries.
It can also provide default arguments to the --boost-mt command-line arguments.
Everything will be packaged together in a BOOST component that you can use.
When using MSVC, a lot of compilation flags need to match your BOOST build configuration:
- you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined.
Errors: C4530
- boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC
So before calling `conf.check_boost` you might want to disabling by adding
conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB']
Errors:
- boost might also be compiled with /MT, which links the runtime statically.
If you have problems with redefined symbols,
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc']
Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases.
'''
import sys
import re
from waflib import Utils, Logs, Errors
from waflib.Configure import conf
from waflib.TaskGen import feature, after_method
BOOST_LIBS = ['/usr/lib/x86_64-linux-gnu', '/usr/lib/i386-linux-gnu',
'/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib']
BOOST_INCLUDES = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include']
BOOST_VERSION_FILE = 'boost/version.hpp'
BOOST_VERSION_CODE = '''
#include <iostream>
#include <boost/version.hpp>
int main() { std::cout << BOOST_LIB_VERSION << std::endl; }
'''
BOOST_ERROR_CODE = '''
#include <boost/system/error_code.hpp>
int main() { boost::system::error_code c; }
'''
BOOST_THREAD_CODE = '''
#include <boost/thread.hpp>
int main() { boost::thread t; }
'''
# toolsets from {boost_dir}/tools/build/v2/tools/common.jam
PLATFORM = Utils.unversioned_sys_platform()
detect_intel = lambda env: (PLATFORM == 'win32') and 'iw' or 'il'
detect_clang = lambda env: (PLATFORM == 'darwin') and 'clang-darwin' or 'clang'
detect_mingw = lambda env: (re.search('MinGW', env.CXX[0])) and 'mgw' or 'gcc'
BOOST_TOOLSETS = {
'borland': 'bcb',
'clang': detect_clang,
'como': 'como',
'cw': 'cw',
'darwin': 'xgcc',
'edg': 'edg',
'g++': detect_mingw,
'gcc': detect_mingw,
'icpc': detect_intel,
'intel': detect_intel,
'kcc': 'kcc',
'kylix': 'bck',
'mipspro': 'mp',
'mingw': 'mgw',
'msvc': 'vc',
'qcc': 'qcc',
'sun': 'sw',
'sunc++': 'sw',
'tru64cxx': 'tru',
'vacpp': 'xlc'
}
def options(opt):
opt.add_option('--boost-includes', type='string',
default='', dest='boost_includes',
help='''path to the boost includes root (~boost root)
e.g. /path/to/boost_1_47_0''')
opt.add_option('--boost-libs', type='string',
default='', dest='boost_libs',
help='''path to the directory where the boost libs are
e.g. /path/to/boost_1_47_0/stage/lib''')
opt.add_option('--boost-mt', action='store_true',
default=False, dest='boost_mt',
help='select multi-threaded libraries')
opt.add_option('--boost-abi', type='string', default='', dest='boost_abi',
help='''select libraries with tags (gd for debug, static is automatically added),
see doc Boost, Getting Started, chapter 6.1''')
opt.add_option('--boost-linkage_autodetect', action="store_true", dest='boost_linkage_autodetect',
help="auto-detect boost linkage options (don't get used to it / might break other stuff)")
opt.add_option('--boost-toolset', type='string',
default='', dest='boost_toolset',
help='force a toolset e.g. msvc, vc90, \
gcc, mingw, mgw45 (default: auto)')
py_version = '%d%d' % (sys.version_info[0], sys.version_info[1])
opt.add_option('--boost-python', type='string',
default=py_version, dest='boost_python',
help='select the lib python with this version \
(default: %s)' % py_version)
@conf
def __boost_get_version_file(self, d):
if not d:
return None
dnode = self.root.find_dir(d)
if dnode:
return dnode.find_node(BOOST_VERSION_FILE)
return None
@conf
def boost_get_version(self, d):
"""silently retrieve the boost version number"""
node = self.__boost_get_version_file(d)
if node:
try:
txt = node.read()
except (OSError, IOError):
Logs.error("Could not read the file %r" % node.abspath())
else:
re_but = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.*)"', re.M)
m = re_but.search(txt)
if m:
return m.group(1)
return self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[d], execute=True, define_ret=True)
@conf
def boost_get_includes(self, *k, **kw):
includes = k and k[0] or kw.get('includes', None)
if includes and self.__boost_get_version_file(includes):
return includes
for d in self.environ.get('INCLUDE', '').split(';') + BOOST_INCLUDES:
if self.__boost_get_version_file(d):
return d
if includes:
self.end_msg('headers not found in %s' % includes)
self.fatal('The configuration failed')
else:
self.end_msg('headers not found, please provide a --boost-includes argument (see help)')
self.fatal('The configuration failed')
@conf
def boost_get_toolset(self, cc):
toolset = cc
if not cc:
build_platform = Utils.unversioned_sys_platform()
if build_platform in BOOST_TOOLSETS:
cc = build_platform
else:
cc = self.env.CXX_NAME
if cc in BOOST_TOOLSETS:
toolset = BOOST_TOOLSETS[cc]
return isinstance(toolset, str) and toolset or toolset(self.env)
@conf
def __boost_get_libs_path(self, *k, **kw):
''' return the lib path and all the files in it '''
if 'files' in kw:
return self.root.find_dir('.'), Utils.to_list(kw['files'])
libs = k and k[0] or kw.get('libs', None)
if libs:
path = self.root.find_dir(libs)
files = path.ant_glob('*boost_*')
if not libs or not files:
for d in self.environ.get('LIB', '').split(';') + BOOST_LIBS:
if not d:
continue
path = self.root.find_dir(d)
if path:
files = path.ant_glob('*boost_*')
if files:
break
path = self.root.find_dir(d + '64')
if path:
files = path.ant_glob('*boost_*')
if files:
break
if not path:
if libs:
self.end_msg('libs not found in %s' % libs)
self.fatal('The configuration failed')
else:
self.end_msg('libs not found, please provide a --boost-libs argument (see help)')
self.fatal('The configuration failed')
self.to_log('Found the boost path in %r with the libraries:' % path)
for x in files:
self.to_log(' %r' % x)
return path, files
@conf
def boost_get_libs(self, *k, **kw):
'''
return the lib path and the required libs
according to the parameters
'''
path, files = self.__boost_get_libs_path(**kw)
files = sorted(files, key=lambda f: (len(f.name), f.name), reverse=True)
toolset = self.boost_get_toolset(kw.get('toolset', ''))
toolset_pat = '(-%s[0-9]{0,3})' % toolset
version = '-%s' % self.env.BOOST_VERSION
def find_lib(re_lib, files):
for file in files:
if re_lib.search(file.name):
self.to_log('Found boost lib %s' % file)
return file
return None
def format_lib_name(name):
if name.startswith('lib') and self.env.CC_NAME != 'msvc':
name = name[3:]
return name[:name.rfind('.')]
def match_libs(lib_names, is_static):
libs = []
lib_names = Utils.to_list(lib_names)
if not lib_names:
return libs
t = []
if kw.get('mt', False):
t.append('-mt')
if kw.get('abi', None):
t.append('%s%s' % (is_static and '-s' or '-', kw['abi']))
elif is_static:
t.append('-s')
tags_pat = t and ''.join(t) or ''
ext = is_static and self.env.cxxstlib_PATTERN or self.env.cxxshlib_PATTERN
ext = ext.partition('%s')[2] # remove '%s' or 'lib%s' from PATTERN
for lib in lib_names:
if lib == 'python':
# for instance, with python='27',
# accepts '-py27', '-py2', '27' and '2'
# but will reject '-py3', '-py26', '26' and '3'
tags = '({0})?((-py{2})|(-py{1}(?=[^0-9]))|({2})|({1}(?=[^0-9]))|(?=[^0-9])(?!-py))'.format(tags_pat, kw['python'][0], kw['python'])
else:
tags = tags_pat
# Trying libraries, from most strict match to least one
for pattern in ['boost_%s%s%s%s%s$' % (lib, toolset_pat, tags, version, ext),
'boost_%s%s%s%s$' % (lib, tags, version, ext),
# Give up trying to find the right version
'boost_%s%s%s%s$' % (lib, toolset_pat, tags, ext),
'boost_%s%s%s$' % (lib, tags, ext),
'boost_%s%s$' % (lib, ext),
'boost_%s' % lib]:
self.to_log('Trying pattern %s' % pattern)
file = find_lib(re.compile(pattern), files)
if file:
libs.append(format_lib_name(file.name))
break
else:
self.end_msg('lib %s not found in %s' % (lib, path.abspath()))
self.fatal('The configuration failed')
return libs
return path.abspath(), match_libs(kw.get('lib', None), False), match_libs(kw.get('stlib', None), True)
@conf
def check_boost(self, *k, **kw):
"""
Initialize boost libraries to be used.
Keywords: you can pass the same parameters as with the command line (without "--boost-").
Note that the command line has the priority, and should preferably be used.
"""
if not self.env['CXX']:
self.fatal('load a c++ compiler first, conf.load("compiler_cxx")')
params = {
'lib': k and k[0] or kw.get('lib', None),
'stlib': kw.get('stlib', None)
}
for key, value in self.options.__dict__.items():
if not key.startswith('boost_'):
continue
key = key[len('boost_'):]
params[key] = value and value or kw.get(key, '')
var = kw.get('uselib_store', 'BOOST')
self.start_msg('Checking boost includes')
self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params)
self.env.BOOST_VERSION = self.boost_get_version(inc)
self.end_msg(self.env.BOOST_VERSION)
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % self.env['INCLUDES_%s' % var])
if not params['lib'] and not params['stlib']:
return
if 'static' in kw or 'static' in params:
Logs.warn('boost: static parameter is deprecated, use stlib instead.')
self.start_msg('Checking boost libs')
path, libs, stlibs = self.boost_get_libs(**params)
self.env['LIBPATH_%s' % var] = [path]
self.env['STLIBPATH_%s' % var] = [path]
self.env['LIB_%s' % var] = libs
self.env['STLIB_%s' % var] = stlibs
self.end_msg('ok')
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % path)
Logs.pprint('CYAN', ' shared libs : %s' % libs)
Logs.pprint('CYAN', ' static libs : %s' % stlibs)
def try_link():
if (params['lib'] and 'system' in params['lib']) or \
params['stlib'] and 'system' in params['stlib']:
self.check_cxx(fragment=BOOST_ERROR_CODE, use=var, execute=False)
if (params['lib'] and 'thread' in params['lib']) or \
params['stlib'] and 'thread' in params['stlib']:
self.check_cxx(fragment=BOOST_THREAD_CODE, use=var, execute=False)
if params.get('linkage_autodetect', False):
self.start_msg("Attempting to detect boost linkage flags")
toolset = self.boost_get_toolset(kw.get('toolset', ''))
if toolset in ('vc',):
# disable auto-linking feature, causing error LNK1181
# because the code wants to be linked against
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
# if no dlls are present, we guess the .lib files are not stubs
has_dlls = False
for x in Utils.listdir(path):
if x.endswith(self.env.cxxshlib_PATTERN % ''):
has_dlls = True
break
if not has_dlls:
self.env['STLIBPATH_%s' % var] = [path]
self.env['STLIB_%s' % var] = libs
del self.env['LIB_%s' % var]
del self.env['LIBPATH_%s' % var]
# we attempt to play with some known-to-work CXXFLAGS combinations
for cxxflags in (['/MD', '/EHsc'], []):
self.env.stash()
self.env["CXXFLAGS_%s" % var] += cxxflags
try:
try_link()
self.end_msg("ok: winning cxxflags combination: %s" % (self.env["CXXFLAGS_%s" % var]))
exc = None
break
except Errors.ConfigurationError as e:
self.env.revert()
exc = e
if exc is not None:
self.end_msg("Could not auto-detect boost linking flags combination, you may report it to boost.py author", ex=exc)
self.fatal('The configuration failed')
else:
self.end_msg("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain")
self.fatal('The configuration failed')
else:
self.start_msg('Checking for boost linkage')
try:
try_link()
except Errors.ConfigurationError as e:
self.end_msg("Could not link against boost libraries using supplied options")
self.fatal('The configuration failed')
self.end_msg('ok')
@feature('cxx')
@after_method('apply_link')
def install_boost(self):
if install_boost.done or not Utils.is_win32 or not self.bld.cmd.startswith('install'):
return
install_boost.done = True
inst_to = getattr(self, 'install_path', '${BINDIR}')
for lib in self.env.LIB_BOOST:
try:
file = self.bld.find_file(self.env.cxxshlib_PATTERN % lib, self.env.LIBPATH_BOOST)
self.bld.install_files(inst_to, self.bld.root.find_node(file))
except:
continue
install_boost.done = False
| bsd-3-clause |
diwer/sublimeconfig | Packages/BracketHighlighter/bh_rules.py | 2 | 7452 | import ure
import bh_plugin
from bh_logging import debug, log
def exclude_bracket(enabled, filter_type, language_list, language):
"""
Exclude or include brackets based on filter lists.
"""
exclude = True
if enabled:
# Black list languages
if filter_type == 'blacklist':
exclude = False
if language is not None:
for item in language_list:
if language == item.lower():
exclude = True
break
# White list languages
elif filter_type == 'whitelist':
if language is not None:
for item in language_list:
if language == item.lower():
exclude = False
break
return exclude
def is_valid_definition(params, language):
"""
Ensure bracket definition should be and can be loaded.
"""
return (
not exclude_bracket(
params.get("enabled", True),
params.get("language_filter", "blacklist"),
params.get("language_list", []),
language
) and
params["open"] is not None and params["close"] is not None
)
class BracketDefinition(object):
"""
Normal bracket definition.
"""
def __init__(self, bracket):
"""
Setup the bracket object by reading the passed in dictionary.
"""
self.name = bracket["name"]
self.style = bracket.get("style", "default")
self.compare = bracket.get("compare")
sub_search = bracket.get("find_in_sub_search", "false")
self.find_in_sub_search_only = sub_search == "only"
self.find_in_sub_search = sub_search == "true" or self.find_in_sub_search_only
self.post_match = bracket.get("post_match")
self.validate = bracket.get("validate")
self.scope_exclude_exceptions = bracket.get("scope_exclude_exceptions", [])
self.scope_exclude = bracket.get("scope_exclude", [])
self.ignore_string_escape = bracket.get("ignore_string_escape", False)
class ScopeDefinition(object):
"""
Scope bracket definition.
"""
def __init__(self, bracket):
"""
Setup the bracket object by reading the passed in dictionary.
"""
self.style = bracket.get("style", "default")
self.open = ure.compile("\\A" + bracket.get("open", "."), ure.MULTILINE | ure.IGNORECASE)
self.close = ure.compile(bracket.get("close", ".") + "\\Z", ure.MULTILINE | ure.IGNORECASE)
self.name = bracket["name"]
sub_search = bracket.get("sub_bracket_search", "false")
self.sub_search_only = sub_search == "only"
self.sub_search = self.sub_search_only is True or sub_search == "true"
self.compare = bracket.get("compare")
self.post_match = bracket.get("post_match")
self.validate = bracket.get("validate")
self.scopes = bracket["scopes"]
class SearchRules(object):
def __init__(self, brackets, scopes, string_escape_mode, outside_adj):
self.bracket_rules = brackets
self.scope_rules = scopes
self.enabled = False
self.string_escape_mode = string_escape_mode
self.outside_adj = outside_adj
def load_rules(self, language, modules):
self.enabled = False
self.brackets = []
self.scopes = []
self.check_compare = False
self.check_validate = False
self.check_post_match = False
self.parse_bracket_definition(language, modules)
self.parse_scope_definition(language, modules)
if len(self.scopes) or len(self.brackets):
self.enabled = True
def parse_bracket_definition(self, language, loaded_modules):
"""
Parse the bracket defintion
"""
names = []
subnames = []
find_regex = []
sub_find_regex = []
for params in self.bracket_rules:
if is_valid_definition(params, language):
try:
bh_plugin.load_modules(params, loaded_modules)
entry = BracketDefinition(params)
if not self.check_compare and entry.compare is not None:
self.check_compare = True
if not self.check_validate and entry.validate is not None:
self.check_validate = True
if not self.check_post_match and entry.post_match is not None:
self.check_post_match = True
self.brackets.append(entry)
if not entry.find_in_sub_search_only:
find_regex.append(params["open"])
find_regex.append(params["close"])
names.append(params["name"])
else:
find_regex.append(r"([^\s\S])")
find_regex.append(r"([^\s\S])")
if entry.find_in_sub_search:
sub_find_regex.append(params["open"])
sub_find_regex.append(params["close"])
subnames.append(params["name"])
else:
sub_find_regex.append(r"([^\s\S])")
sub_find_regex.append(r"([^\s\S])")
except Exception as e:
log(e)
if len(self.brackets):
self.brackets = tuple(self.brackets)
debug(
"Bracket Pattern: (%s)\n" % ','.join(names) +
" (Opening|Closing): (?:%s)\n" % '|'.join(find_regex)
)
debug(
"SubBracket Pattern: (%s)\n" % ','.join(subnames) +
" (Opening|Closing): (?:%s)\n" % '|'.join(sub_find_regex)
)
self.sub_pattern = ure.compile("(?:%s)" % '|'.join(sub_find_regex), ure.MULTILINE | ure.IGNORECASE)
self.pattern = ure.compile("(?:%s)" % '|'.join(find_regex), ure.MULTILINE | ure.IGNORECASE)
def parse_scope_definition(self, language, loaded_modules):
"""
Parse the scope defintion
"""
scopes = {}
scope_count = 0
for params in self.scope_rules:
if is_valid_definition(params, language):
try:
bh_plugin.load_modules(params, loaded_modules)
entry = ScopeDefinition(params)
if not self.check_compare and entry.compare is not None:
self.check_compare = True
if not self.check_validate and entry.validate is not None:
self.check_validate = True
if not self.check_post_match and entry.post_match is not None:
self.check_post_match = True
for x in entry.scopes:
if x not in scopes:
scopes[x] = scope_count
scope_count += 1
self.scopes.append({"name": x, "brackets": [entry]})
else:
self.scopes[scopes[x]]["brackets"].append(entry)
debug("Scope Regex (%s)\n Opening: %s\n Closing: %s\n" % (entry.name, entry.open.pattern, entry.close.pattern))
except Exception as e:
log(e)
| mit |
elitak/pexpect | tests/platform_tests/test.py | 3 | 2094 | #!/usr/bin/env python
import signal, os, time, errno, pty
def signal_handler (signum, frame):
print 'Signal handler called with signal:', signum
print 'signal.SIGCHLD=', signal.SIGKILL
# First thing we do is set up a handler for SIGCHLD.
signal.signal (signal.SIGCHLD, signal.SIG_IGN)
print 'PART 1 -- Test signal handling with empty pipe.'
# Create a child process for us to kill.
try:
pid, fd = pty.fork()
except Exception, e:
print str(e)
if pid == 0:
# os.write (sys.stdout.fileno(), 'This is a test.\n This is a test.')
time.sleep(10000)
print 'Sending SIGKILL to child pid:', pid
os.kill (pid, signal.SIGKILL)
# SIGCHLD should interrupt sleep.
# Note that this is a race.
# It is possible that the signal handler will get called
# before we try to sleep, but this has not happened yet.
# But in that case we can only tell by order of printed output.
print 'Entering sleep...'
try:
time.sleep(10)
except:
print 'sleep was interrupted by signal.'
# Just for fun let's see if the process is alive.
try:
os.kill(pid, 0)
print 'Child is alive. This is ambiguous because it may be a Zombie.'
except OSError, e:
print 'Child appears to be dead.'
print 'PART 2 -- Test signal handling with full pipe.'
# Create a child process for us to kill.
try:
pid, fd = pty.fork()
except Exception, e:
print str(e)
if pid == 0:
os.write (sys.stdout.fileno(), 'This is a test.\n This is a test.')
time.sleep(10000)
print 'Sending SIGKILL to child pid:', pid
os.kill (pid, signal.SIGKILL)
# SIGCHLD should interrupt sleep.
# Note that this is a race.
# It is possible that the signal handler will get called
# before we try to sleep, but this has not happened yet.
# But in that case we can only tell by order of printed output.
print 'Entering sleep...'
try:
time.sleep(10)
except:
print 'sleep was interrupted by signal.'
# Just for fun let's see if the process is alive.
try:
os.kill(pid, 0)
print 'Child is alive. This is ambiguous because it may be a Zombie.'
except OSError, e:
print 'Child appears to be dead.'
| mit |
neonmaccca/theHarvester | discovery/googleCSE.py | 23 | 3546 | import string
import httplib
import sys
import myparser
import re
import time
class search_googleCSE:
def __init__(self, word, limit, start):
self.word = word
self.files = "pdf"
self.results = ""
self.totalresults = ""
self.server = "www.googleapis.com"
self.hostname = "www.googleapis.com"
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.quantity = "10"
self.limit = limit
self.counter = 1
self.api_key = ""
self.cse_id = ""
self.lowRange = start
self.highRange = start+100
def do_search(self):
h = httplib.HTTPS(self.server)
h.putrequest('GET', "/customsearch/v1?key=" + self.api_key +"&highRange=" + str(self.highRange) + "&lowRange=" + str(self.lowRange) + "&cx=" +self.cse_id +
"&start=" + str(self.counter) + "&q=%40\"" + self.word + "\"")
h.putheader('Host', self.server)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults += self.results
def do_search_files(self):
h = httplib.HTTPS(self.server)
h.putrequest('GET', "/customsearch/v1?key=" + self.api_key +"&highRange=" + str(self.highRange) + "&lowRange=" + str(self.lowRange) + "&cx=" +self.cse_id +
"&start=" + str(self.counter) + "&q=filetype:" + files +"%20site:" + self.word)
h.putheader('Host', self.server)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults += self.results
def check_next(self):
renext = re.compile('> Next <')
nextres = renext.findall(self.results)
if nextres != []:
nexty = "1"
else:
nexty = "0"
return nexty
def get_emails(self):
rawres = myparser.parser(self.totalresults, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.parser(self.totalresults, self.word)
return rawres.hostnames()
def get_files(self):
rawres = myparser.parser(self.totalresults, self.word)
return rawres.fileurls(self.files)
def process(self):
tracker=self.counter + self.lowRange
while tracker <= self.limit:
self.do_search()
#time.sleep(1)
ESC=chr(27)
sys.stdout.write(ESC + '[2K' + ESC+'[G')
sys.stdout.write("\r\t" + "Searching " + str(self.counter+self.lowRange) + " results ..." )
sys.stdout.flush()
#print "\tSearching " + str(self.counter+self.lowRange) + " results...\t\t\t\t\t\r"
if self.counter == 101:
self.counter = 1
self.lowRange +=100
self.highRange +=100
else:
self.counter += 10
tracker=self.counter + self.lowRange
def store_results(self):
filename = "debug_results.txt"
file = open(filename, 'w')
file.write(self.totalresults)
def process_files(self, files):
while self.counter <= self.limit:
self.do_search_files(files)
time.sleep(1)
self.counter += 100
print "\tSearching " + str(self.counter) + " results..."
| gpl-2.0 |
bruderstein/PythonScript | PythonLib/full/encodings/iso8859_15.py | 272 | 13212 | """ Python Character Mapping Codec iso8859_15 generated from 'MAPPINGS/ISO8859/8859-15.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-15',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\u20ac' # 0xA4 -> EURO SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON
'\xa7' # 0xA7 -> SECTION SIGN
'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE
'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE
'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
jaap-karssenberg/zim-desktop-wiki | tests/operations.py | 1 | 4550 |
# Copyright 2017 Jaap Karssenberg <jaap.karssenberg@gmail.com>
import tests
from gi.repository import Gtk
from zim.notebook.operations import *
class MockProgressDialog(object):
def __init__(self, parent, operation):
# self.heading.set_text(operation.msg)
operation.connect('step', self.on_iteration_step)
operation.connect('finished', self.on_iteration_finished)
self.steps = []
self.finished = False
def on_iteration_step(self, o, progress):
self.steps.append(progress)
if isinstance(progress, tuple) and len(progress) == 3:
i, total, msg = progress
# self.bar.set_fraction(i/total)
# self.bar.set_text('%i / %i' % (i, total))
else:
msg = progress
# self.bar.pulse()
# if isinstance(msg, basestring):
# self.label.set_text(msg)
def on_iteration_finished(self, o):
self.finished = True
# self.response(...) # break run() loop
class MockNotebook(object):
def __init__(self):
self._operation_check = NOOP
self.value = None
@notebook_state
def test(self, value):
self.value = value
def raw(self, value):
self.value = value
def mock_iterator(notebook):
for i in 0, 1, 2:
notebook.test('Test %i' % i)
yield i
def mock_gtk_iter(notebook):
for i in 0, 1, 2:
notebook.test('Test %i' % i)
yield i
Gtk.main_quit()
class TestNotebookOperation(tests.TestCase):
## TODO add signal monitor to check step and finished emitted
def testIterator(self):
nb = MockNotebook()
nb.test('Foo')
self.assertEqual(nb.value, 'Foo')
# Test iterator
op = NotebookOperation(nb, 'My Op', mock_iterator(nb))
self.assertFalse(op.is_running())
nb.test('Bar')
self.assertEqual(nb.value, 'Bar')
i = None
for i, x in enumerate(op):
self.assertTrue(op.is_running())
self.assertEqual(nb.value, 'Test %i' % i)
self.assertRaises(NotebookOperationOngoing, nb.test, 'Foo')
self.assertEqual(i, 2)
self.assertFalse(op.is_running())
self.assertFalse(op.cancelled)
nb.test('Baz')
self.assertEqual(nb.value, 'Baz')
# Test cancel
op = NotebookOperation(nb, 'My Op', mock_iterator(nb))
i = None
for i, x in enumerate(op):
self.assertTrue(op.is_running())
op.cancel()
self.assertEqual(i, 0)
self.assertFalse(op.is_running())
self.assertTrue(op.cancelled)
def testIdle(self):
nb = MockNotebook()
op = NotebookOperation(nb, 'My Op', mock_gtk_iter(nb))
op.run_on_idle()
Gtk.main()
self.assertFalse(op.is_running())
self.assertEqual(nb.value, 'Test %i' % 2)
def testContext(self):
nb = MockNotebook()
def test():
with NotebookState(nb):
nb.raw('Foo')
test()
self.assertEqual(nb.value, 'Foo')
op = NotebookOperation(nb, 'My Op', mock_iterator(nb))
for i, x in enumerate(op):
self.assertRaises(NotebookOperationOngoing, test)
def testSignals(self):
nb = MockNotebook()
op = NotebookOperation(nb, 'My Op', mock_iterator(nb))
dialog = MockProgressDialog(None, op)
i = None
for i, x in enumerate(op):
pass
self.assertEqual(i, 2)
self.assertEqual(len(dialog.steps), 3)
self.assertTrue(dialog.finished)
op = NotebookOperation(nb, 'My Op', mock_iterator(nb))
dialog = MockProgressDialog(None, op)
i = None
for i, x in enumerate(op):
self.assertTrue(op.is_running())
op.cancel()
self.assertEqual(i, 0)
self.assertEqual(len(dialog.steps), 1)
self.assertTrue(dialog.finished)
import threading
def mock_thread_main(notebook, lock):
with lock:
for i in 0, 1, 2:
notebook.test('Test %i' % i)
class TestSimpleAsyncOperation(tests.TestCase):
def runTest(self):
nb = MockNotebook()
lock = threading.Lock()
lock.acquire()
thread = threading.Thread(target=mock_thread_main, args=(nb, lock))
thread.start()
# using lock to ensure thread doesn't finish before iteration seen
result = []
def post():
result.append('foo')
op = SimpleAsyncOperation(nb, 'my op', thread, post)
for i, x in enumerate(op):
self.assertTrue(op.is_running())
if i == 1:
lock.release()
self.assertTrue(i >= 1)
self.assertFalse(op.is_running())
self.assertFalse(op.cancelled)
self.assertEqual(nb.value, 'Test 2')
self.assertEqual(result, ['foo'])
# now with cancel - result can vary depending on who goes first
lock = threading.Lock()
lock.acquire()
thread = threading.Thread(target=mock_thread_main, args=(nb, lock))
thread.start()
op = SimpleAsyncOperation(nb, 'my op', thread, post)
for i, x in enumerate(op):
if i == 1:
lock.release()
op.cancel()
self.assertFalse(op.is_running())
self.assertTrue(op.cancelled)
| gpl-2.0 |
mpetyx/heroku-buildpack-couchbase-geo-django | vendor/pip-1.3.1/pip/vcs/subversion.py | 63 | 10620 | import os
import re
from pip.backwardcompat import urlparse
from pip import InstallationError
from pip.index import Link
from pip.util import rmtree, display_path, call_subprocess
from pip.log import logger
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
bundle_file = 'svn-checkout.txt'
guide = ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %(rev)s %(url)s .\n')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
output = call_subprocess(
[self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, None
return url, match.group(1)
def parse_vcs_bundle_file(self, content):
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^-r\s*([^ ])?', line)
if not match:
return None, None
rev = match.group(1)
rest = line[match.end():].strip().split(None, 1)[0]
return rest, rev
return None, None
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
logger.notify('Exporting svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
[self.cmd, 'export'] + rev_options + [url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
f = open(os.path.join(location, self.dirname, 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = call_subprocess([self.cmd, 'info', '--xml', location], show_stdout=False)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
[self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urlparse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
| mit |
dgoedkoop/QGIS | python/plugins/processing/algs/qgis/SingleSidedBuffer.py | 12 | 4922 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SingleSidedBuffer.py
--------------------
Date : August 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'August 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
from qgis.core import (QgsGeometry,
QgsWkbTypes,
QgsProcessing,
QgsProcessingParameterDistance,
QgsProcessingParameterNumber,
QgsProcessingParameterEnum,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class SingleSidedBuffer(QgisFeatureBasedAlgorithm):
DISTANCE = 'DISTANCE'
SIDE = 'SIDE'
SEGMENTS = 'SEGMENTS'
JOIN_STYLE = 'JOIN_STYLE'
MITER_LIMIT = 'MITER_LIMIT'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
self.distance = None
self.segments = None
self.join_style = None
self.side = None
self.miter_limit = None
self.sides = [self.tr('Left'),
'Right']
self.join_styles = [self.tr('Round'),
'Miter',
'Bevel']
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterDistance(self.DISTANCE,
self.tr('Distance'), parentParameterName='INPUT',
defaultValue=10.0))
self.addParameter(QgsProcessingParameterEnum(
self.SIDE,
self.tr('Side'),
options=self.sides))
self.addParameter(QgsProcessingParameterNumber(self.SEGMENTS,
self.tr('Segments'), QgsProcessingParameterNumber.Integer,
minValue=1, defaultValue=8))
self.addParameter(QgsProcessingParameterEnum(
self.JOIN_STYLE,
self.tr('Join style'),
options=self.join_styles))
self.addParameter(QgsProcessingParameterNumber(self.MITER_LIMIT,
self.tr('Miter limit'), QgsProcessingParameterNumber.Double,
minValue=1, defaultValue=2))
def name(self):
return 'singlesidedbuffer'
def displayName(self):
return self.tr('Single sided buffer')
def outputName(self):
return self.tr('Buffer')
def inputLayerTypes(self):
return [QgsProcessing.TypeVectorLine]
def outputType(self):
return QgsProcessing.TypeVectorPolygon
def outputWkbType(self, input_wkb_type):
return QgsWkbTypes.Polygon
def prepareAlgorithm(self, parameters, context, feedback):
self.distance = self.parameterAsDouble(parameters, self.DISTANCE, context)
self.segments = self.parameterAsInt(parameters, self.SEGMENTS, context)
self.join_style = self.parameterAsEnum(parameters, self.JOIN_STYLE, context) + 1
if self.parameterAsEnum(parameters, self.SIDE, context) == 0:
self.side = QgsGeometry.SideLeft
else:
self.side = QgsGeometry.SideRight
self.miter_limit = self.parameterAsDouble(parameters, self.MITER_LIMIT, context)
return True
def processFeature(self, feature, context, feedback):
input_geometry = feature.geometry()
if input_geometry:
output_geometry = input_geometry.singleSidedBuffer(self.distance, self.segments,
self.side, self.join_style, self.miter_limit)
if not output_geometry:
raise QgsProcessingException(
self.tr('Error calculating single sided buffer'))
feature.setGeometry(output_geometry)
return [feature]
| gpl-2.0 |
therandomcode/WikiWriter | lib/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| apache-2.0 |
10clouds/edx-platform | lms/djangoapps/discussion_api/tests/utils.py | 4 | 13936 | """
Discussion API test utilities
"""
import json
import re
import httpretty
def _get_thread_callback(thread_data):
"""
Get a callback function that will return POST/PUT data overridden by
response_overrides.
"""
def callback(request, _uri, headers):
"""
Simulate the thread creation or update endpoint by returning the provided
data along with the data from response_overrides and dummy values for any
additional required fields.
"""
response_data = make_minimal_cs_thread(thread_data)
for key, val_list in request.parsed_body.items():
val = val_list[0]
if key in ["anonymous", "anonymous_to_peers", "closed", "pinned"]:
response_data[key] = val == "True"
else:
response_data[key] = val
return (200, headers, json.dumps(response_data))
return callback
def _get_comment_callback(comment_data, thread_id, parent_id):
"""
Get a callback function that will return a comment containing the given data
plus necessary dummy data, overridden by the content of the POST/PUT
request.
"""
def callback(request, _uri, headers):
"""
Simulate the comment creation or update endpoint as described above.
"""
response_data = make_minimal_cs_comment(comment_data)
# thread_id and parent_id are not included in request payload but
# are returned by the comments service
response_data["thread_id"] = thread_id
response_data["parent_id"] = parent_id
for key, val_list in request.parsed_body.items():
val = val_list[0]
if key in ["anonymous", "anonymous_to_peers", "endorsed"]:
response_data[key] = val == "True"
else:
response_data[key] = val
return (200, headers, json.dumps(response_data))
return callback
class CommentsServiceMockMixin(object):
"""Mixin with utility methods for mocking the comments service"""
def register_get_threads_response(self, threads, page, num_pages):
"""Register a mock response for GET on the CS thread list endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads",
body=json.dumps({
"collection": threads,
"page": page,
"num_pages": num_pages,
"thread_count": len(threads),
}),
status=200
)
def register_get_threads_search_response(self, threads, rewrite, num_pages=1):
"""Register a mock response for GET on the CS thread search endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/search/threads",
body=json.dumps({
"collection": threads,
"page": 1,
"num_pages": num_pages,
"corrected_text": rewrite,
"thread_count": len(threads),
}),
status=200
)
def register_post_thread_response(self, thread_data):
"""Register a mock response for POST on the CS commentable endpoint"""
httpretty.register_uri(
httpretty.POST,
re.compile(r"http://localhost:4567/api/v1/(\w+)/threads"),
body=_get_thread_callback(thread_data)
)
def register_put_thread_response(self, thread_data):
"""
Register a mock response for PUT on the CS endpoint for the given
thread_id.
"""
httpretty.register_uri(
httpretty.PUT,
"http://localhost:4567/api/v1/threads/{}".format(thread_data["id"]),
body=_get_thread_callback(thread_data)
)
def register_get_thread_error_response(self, thread_id, status_code):
"""Register a mock error response for GET on the CS thread endpoint."""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads/{id}".format(id=thread_id),
body="",
status=status_code
)
def register_get_thread_response(self, thread):
"""
Register a mock response for GET on the CS thread instance endpoint.
"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/threads/{id}".format(id=thread["id"]),
body=json.dumps(thread),
status=200
)
def register_post_comment_response(self, comment_data, thread_id, parent_id=None):
"""
Register a mock response for POST on the CS comments endpoint for the
given thread or parent; exactly one of thread_id and parent_id must be
specified.
"""
if parent_id:
url = "http://localhost:4567/api/v1/comments/{}".format(parent_id)
else:
url = "http://localhost:4567/api/v1/threads/{}/comments".format(thread_id)
httpretty.register_uri(
httpretty.POST,
url,
body=_get_comment_callback(comment_data, thread_id, parent_id)
)
def register_put_comment_response(self, comment_data):
"""
Register a mock response for PUT on the CS endpoint for the given
comment data (which must include the key "id").
"""
thread_id = comment_data["thread_id"]
parent_id = comment_data.get("parent_id")
httpretty.register_uri(
httpretty.PUT,
"http://localhost:4567/api/v1/comments/{}".format(comment_data["id"]),
body=_get_comment_callback(comment_data, thread_id, parent_id)
)
def register_get_comment_error_response(self, comment_id, status_code):
"""
Register a mock error response for GET on the CS comment instance
endpoint.
"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/comments/{id}".format(id=comment_id),
body="",
status=status_code
)
def register_get_comment_response(self, response_overrides):
"""
Register a mock response for GET on the CS comment instance endpoint.
"""
comment = make_minimal_cs_comment(response_overrides)
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/comments/{id}".format(id=comment["id"]),
body=json.dumps(comment),
status=200
)
def register_get_user_response(self, user, subscribed_thread_ids=None, upvoted_ids=None):
"""Register a mock response for GET on the CS user instance endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/users/{id}".format(id=user.id),
body=json.dumps({
"id": str(user.id),
"subscribed_thread_ids": subscribed_thread_ids or [],
"upvoted_ids": upvoted_ids or [],
}),
status=200
)
def register_subscribed_threads_response(self, user, threads, page, num_pages):
"""Register a mock response for GET on the CS user instance endpoint"""
httpretty.register_uri(
httpretty.GET,
"http://localhost:4567/api/v1/users/{}/subscribed_threads".format(user.id),
body=json.dumps({
"collection": threads,
"page": page,
"num_pages": num_pages,
"thread_count": len(threads),
}),
status=200
)
def register_subscription_response(self, user):
"""
Register a mock response for POST and DELETE on the CS user subscription
endpoint
"""
for method in [httpretty.POST, httpretty.DELETE]:
httpretty.register_uri(
method,
"http://localhost:4567/api/v1/users/{id}/subscriptions".format(id=user.id),
body=json.dumps({}), # body is unused
status=200
)
def register_thread_votes_response(self, thread_id):
"""
Register a mock response for PUT and DELETE on the CS thread votes
endpoint
"""
for method in [httpretty.PUT, httpretty.DELETE]:
httpretty.register_uri(
method,
"http://localhost:4567/api/v1/threads/{}/votes".format(thread_id),
body=json.dumps({}), # body is unused
status=200
)
def register_comment_votes_response(self, comment_id):
"""
Register a mock response for PUT and DELETE on the CS comment votes
endpoint
"""
for method in [httpretty.PUT, httpretty.DELETE]:
httpretty.register_uri(
method,
"http://localhost:4567/api/v1/comments/{}/votes".format(comment_id),
body=json.dumps({}), # body is unused
status=200
)
def register_flag_response(self, content_type, content_id):
"""Register a mock response for PUT on the CS flag endpoints"""
for path in ["abuse_flag", "abuse_unflag"]:
httpretty.register_uri(
"PUT",
"http://localhost:4567/api/v1/{content_type}s/{content_id}/{path}".format(
content_type=content_type,
content_id=content_id,
path=path
),
body=json.dumps({}), # body is unused
status=200
)
def register_thread_flag_response(self, thread_id):
"""Register a mock response for PUT on the CS thread flag endpoints"""
self.register_flag_response("thread", thread_id)
def register_comment_flag_response(self, comment_id):
"""Register a mock response for PUT on the CS comment flag endpoints"""
self.register_flag_response("comment", comment_id)
def register_delete_thread_response(self, thread_id):
"""
Register a mock response for DELETE on the CS thread instance endpoint
"""
httpretty.register_uri(
httpretty.DELETE,
"http://localhost:4567/api/v1/threads/{id}".format(id=thread_id),
body=json.dumps({}), # body is unused
status=200
)
def register_delete_comment_response(self, comment_id):
"""
Register a mock response for DELETE on the CS comment instance endpoint
"""
httpretty.register_uri(
httpretty.DELETE,
"http://localhost:4567/api/v1/comments/{id}".format(id=comment_id),
body=json.dumps({}), # body is unused
status=200
)
def assert_query_params_equal(self, httpretty_request, expected_params):
"""
Assert that the given mock request had the expected query parameters
"""
actual_params = dict(httpretty_request.querystring)
actual_params.pop("request_id") # request_id is random
self.assertEqual(actual_params, expected_params)
def assert_last_query_params(self, expected_params):
"""
Assert that the last mock request had the expected query parameters
"""
self.assert_query_params_equal(httpretty.last_request(), expected_params)
def request_patch(self, request_data):
"""
make a request to PATCH endpoint and return response
"""
return self.client.patch(
self.url,
json.dumps(request_data),
content_type="application/merge-patch+json"
)
def make_minimal_cs_thread(overrides=None):
"""
Create a dictionary containing all needed thread fields as returned by the
comments service with dummy data and optional overrides
"""
ret = {
"type": "thread",
"id": "dummy",
"course_id": "dummy/dummy/dummy",
"commentable_id": "dummy",
"group_id": None,
"user_id": "0",
"username": "dummy",
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "1970-01-01T00:00:00Z",
"updated_at": "1970-01-01T00:00:00Z",
"last_activity_at": "1970-01-01T00:00:00Z",
"thread_type": "discussion",
"title": "dummy",
"body": "dummy",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 0},
"comments_count": 0,
"unread_comments_count": 0,
"children": [],
"read": False,
"endorsed": False,
"resp_total": 0,
}
ret.update(overrides or {})
return ret
def make_minimal_cs_comment(overrides=None):
"""
Create a dictionary containing all needed comment fields as returned by the
comments service with dummy data and optional overrides
"""
ret = {
"type": "comment",
"id": "dummy",
"commentable_id": "dummy",
"thread_id": "dummy",
"parent_id": None,
"user_id": "0",
"username": "dummy",
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "1970-01-01T00:00:00Z",
"updated_at": "1970-01-01T00:00:00Z",
"body": "dummy",
"abuse_flaggers": [],
"votes": {"up_count": 0},
"endorsed": False,
"child_count": 0,
"children": [],
}
ret.update(overrides or {})
return ret
def make_paginated_api_response(results=None, count=0, num_pages=0, next_link=None, previous_link=None):
"""
Generates the response dictionary of paginated APIs with passed data
"""
return {
"pagination": {
"next": next_link,
"previous": previous_link,
"count": count,
"num_pages": num_pages,
},
"results": results or []
}
| agpl-3.0 |
datakortet/django-cms | cms/test_utils/project/placeholderapp/admin.py | 5 | 2959 | from cms.admin.placeholderadmin import PlaceholderAdmin
from cms.test_utils.project.placeholderapp.models import (Example1, Example2,
Example3, Example4, Example5, MultilingualExample1)
from django.contrib import admin
from hvad.admin import TranslatableAdmin
class MixinAdmin(admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# silly test that placeholderadmin doesn't fuck stuff up
request = kwargs.pop('request', None)
return super(MixinAdmin, self).formfield_for_dbfield(db_field, request=request, **kwargs)
class Example1Admin(PlaceholderAdmin, MixinAdmin):
pass
class Example2Admin(PlaceholderAdmin):
fieldsets = (
('Placeholder + more fields', {
'classes': ('wide',),
'fields': ('char_1', 'placeholder', 'char_2',)
}),
('Other fields', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example3Admin(PlaceholderAdmin):
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Only Placeholder with rigth classes', {
'classes': ('plugin-holder', 'plugin-holder-nopage',),
'fields': ('placeholder',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example4Admin(PlaceholderAdmin):
render_placeholder_language_tabs = False
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Only Placeholder, with wrong classes', {
'classes': ('wide', 'plugin-holder-nopage',),
'fields': ('placeholder',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example5Admin(PlaceholderAdmin):
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Two Placeholder, with right classes', {
'classes': ('plugin', 'plugin-holder-nopage',),
'fields': ('placeholder_1', 'placeholder_2',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class MultilingualAdmin(TranslatableAdmin, PlaceholderAdmin):
pass
admin.site.register(Example1, Example1Admin)
admin.site.register(Example2, Example2Admin)
admin.site.register(Example3, Example3Admin)
admin.site.register(Example4, Example4Admin)
admin.site.register(Example5, Example5Admin)
admin.site.register(MultilingualExample1, MultilingualAdmin)
| bsd-3-clause |
garyjyao1/ansible | lib/ansible/plugins/connection/accelerate.py | 8 | 13413 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import socket
import struct
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleConnectionFailure
from ansible.parsing.utils.jsonify import jsonify
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.connection.ssh import Connection as SSHConnection
from ansible.plugins.connection.paramiko_ssh import Connection as ParamikoConnection
from ansible.utils.encrypt import key_for_hostname, keyczar_encrypt, keyczar_decrypt
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (8 bytes)
# ((1400-8)/4)*3) = 1044
# which leaves room for the TCP/IP header. We set this to a
# multiple of the value to speed up file reads.
CHUNK_SIZE=1044*20
class Connection(ConnectionBase):
''' raw socket accelerated connection '''
transport = 'accelerate'
has_pipelining = False
become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.conn = None
self.key = key_for_hostname(self._play_context.remote_addr)
def _connect(self):
''' activates the connection object '''
if not self._connected:
wrong_user = False
tries = 3
self.conn = socket.socket()
self.conn.settimeout(C.ACCELERATE_CONNECT_TIMEOUT)
self._display.vvvv("attempting connection to %s via the accelerated port %d" % (self._play_context.remote_addr,self._play_context.accelerate_port))
while tries > 0:
try:
self.conn.connect((self._play_context.remote_addr,self._play_context.accelerate_port))
break
except socket.error:
self._display.vvvv("connection to %s failed, retrying..." % self._play_context.remote_addr)
time.sleep(0.1)
tries -= 1
if tries == 0:
self._display.vvv("Could not connect via the accelerated connection, exceeded # of tries")
raise AnsibleConnectionFailure("Failed to connect to %s on the accelerated port %s" % (self._play_context.remote_addr, self._play_context.accelerate_port))
elif wrong_user:
self._display.vvv("Restarting daemon with a different remote_user")
raise AnsibleError("The accelerated daemon was started on the remote with a different user")
self.conn.settimeout(C.ACCELERATE_TIMEOUT)
if not self.validate_user():
# the accelerated daemon was started with a
# different remote_user. The above command
# should have caused the accelerate daemon to
# shutdown, so we'll reconnect.
wrong_user = True
self._connected = True
return self
def send_data(self, data):
packed_len = struct.pack('!Q',len(data))
return self.conn.sendall(packed_len + data)
def recv_data(self):
header_len = 8 # size of a packed unsigned long long
data = b""
try:
self._display.vvvv("%s: in recv_data(), waiting for the header" % self._play_context.remote_addr)
while len(data) < header_len:
d = self.conn.recv(header_len - len(data))
if not d:
self._display.vvvv("%s: received nothing, bailing out" % self._play_context.remote_addr)
return None
data += d
self._display.vvvv("%s: got the header, unpacking" % self._play_context.remote_addr)
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
self._display.vvvv("%s: data received so far (expecting %d): %d" % (self._play_context.remote_addr,data_len,len(data)))
while len(data) < data_len:
d = self.conn.recv(data_len - len(data))
if not d:
self._display.vvvv("%s: received nothing, bailing out" % self._play_context.remote_addr)
return None
self._display.vvvv("%s: received %d bytes" % (self._play_context.remote_addr, len(d)))
data += d
self._display.vvvv("%s: received all of the data, returning" % self._play_context.remote_addr)
return data
except socket.timeout:
raise AnsibleError("timed out while waiting to receive data")
def validate_user(self):
'''
Checks the remote uid of the accelerated daemon vs. the
one specified for this play and will cause the accel
daemon to exit if they don't match
'''
self._display.vvvv("%s: sending request for validate_user" % self._play_context.remote_addr)
data = dict(
mode='validate_user',
username=self._play_context.remote_user,
)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self._play_context.remote_addr)
self._display.vvvv("%s: waiting for validate_user response" % self._play_context.remote_addr)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if "pong" in response:
# it's a keepalive, go back to waiting
self._display.vvvv("%s: received a keepalive packet" % self._play_context.remote_addr)
continue
else:
self._display.vvvv("%s: received the validate_user response: %s" % (self._play_context.remote_addr, response))
break
if response.get('failed'):
return False
else:
return response.get('rc') == 0
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# FIXME:
#if sudoable and self..become and self.runner.become_method not in self.become_methods_supported:
# raise AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC COMMAND %s" % cmd)
data = dict(
mode='command',
cmd=cmd,
executable=C.DEFAULT_EXECUTABLE,
)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self._play_context.remote_addr)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if "pong" in response:
# it's a keepalive, go back to waiting
self._display.vvvv("%s: received a keepalive packet" % self._play_context.remote_addr)
continue
else:
self._display.vvvv("%s: received the response" % self._play_context.remote_addr)
break
return (response.get('rc', None), response.get('stdout', ''), response.get('stderr', ''))
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
fd = file(in_path, 'rb')
fstat = os.stat(in_path)
try:
self._display.vvv("PUT file is %d bytes" % fstat.st_size)
last = False
while fd.tell() <= fstat.st_size and not last:
self._display.vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size))
data = fd.read(CHUNK_SIZE)
if fd.tell() >= fstat.st_size:
last = True
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
if self._play_context.become:
data['user'] = self._play_context.become_user
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send the file to %s" % self._play_context.remote_addr)
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
finally:
fd.close()
self._display.vvvv("waiting for final response after PUT")
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
data = dict(mode='fetch', in_path=in_path)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to initiate the file fetch with %s" % self._play_context.remote_addr)
fh = open(out_path, "w")
try:
bytes = 0
while True:
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed', False):
raise AnsibleError("Error during file fetch, aborting")
out = base64.b64decode(response['data'])
fh.write(out)
bytes += len(out)
# send an empty response back to signify we
# received the last chunk without errors
data = jsonify(dict())
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send ack during file fetch")
if response.get('last', False):
break
finally:
# we don't currently care about this final response,
# we just receive it and drop it. It may be used at some
# point in the future or we may just have the put/fetch
# operations not send back a final response at all
response = self.recv_data()
self._display.vvv("FETCH wrote %d bytes to %s" % (bytes, out_path))
fh.close()
def close(self):
''' terminate the connection '''
# Be a good citizen
try:
self.conn.close()
except:
pass
| gpl-3.0 |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.91/roles/lib_openshift/library/oc_atomic_container.py | 19 | 6966 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: doc/atomic_container -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_atomic_container
short_description: Manage the container images on the atomic host platform
description:
- Manage the container images on the atomic host platform
- Allows to execute the commands on the container images
requirements:
- atomic
- "python >= 2.6"
options:
name:
description:
- Name of the container
required: True
default: null
image:
description:
- The image to use to install the container
required: True
default: null
state:
description:
- State of the container
required: True
choices: ["latest", "absent", "latest", "rollback"]
default: "latest"
values:
description:
- Values for the installation of the container
required: False
default: None
'''
# -*- -*- -*- End included fragment: doc/atomic_container -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_atomic_container.py -*- -*- -*-
# pylint: disable=wrong-import-position,too-many-branches,invalid-name
import json
from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
else:
changed = "Extracting" in out
return rc, out, err, changed
def _uninstall(module, name):
''' uninstall an atomic container by its name. '''
args = ['atomic', 'uninstall', name]
rc, out, err = module.run_command(args, check_rc=False)
return rc, out, err, False
def do_install(module, container, image, values_list):
''' install a container and exit the module. '''
rc, out, err, changed = _install(module, container, image, values_list)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
module.exit_json(msg=out, changed=changed)
def do_uninstall(module, name):
''' uninstall a container and exit the module. '''
rc, out, err, changed = _uninstall(module, name)
if rc != 0:
module.fail_json(rc=rc, msg=err)
module.exit_json(msg=out, changed=changed)
def do_update(module, container, old_image, image, values_list):
''' update a container and exit the module. If the container uses a different
image than the current installed one, then first uninstall the old one '''
# the image we want is different than the installed one
if old_image != image:
rc, out, err, _ = _uninstall(module, container)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return do_install(module, container, image, values_list)
# if the image didn't change, use "atomic containers update"
args = ['atomic', 'containers', 'update'] + values_list + [container]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out
module.exit_json(msg=out, changed=changed)
def do_rollback(module, name):
''' move to the previous deployment of the container, if present, and exit the module. '''
args = ['atomic', 'containers', 'rollback', name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Rolling back" in out
module.exit_json(msg=out, changed=changed)
def core(module):
''' entrypoint for the module. '''
name = module.params['name']
image = module.params['image']
values = module.params['values']
state = module.params['state']
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
out = {}
err = {}
rc = 0
values_list = ["--set=%s" % x for x in values] if values else []
args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return
containers = json.loads(out)
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
if state == 'present' and present:
module.exit_json(msg=out, changed=False)
elif (state in ['latest', 'present']) and not present:
do_install(module, name, image, values_list)
elif state == 'latest':
do_update(module, name, old_image, image, values_list)
elif state == 'absent':
if not present:
module.exit_json(msg="", changed=False)
else:
do_uninstall(module, name)
elif state == 'rollback':
do_rollback(module, name)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, required=True),
image=dict(default=None, required=True),
state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
values=dict(type='list', default=[]),
),
)
# Verify that the platform supports atomic command
rc, _, err = module.run_command('atomic -v', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
try:
core(module)
except Exception as e: # pylint: disable=broad-except
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_atomic_container.py -*- -*- -*-
| apache-2.0 |
lawzou/shoop | shoop/notify/template.py | 6 | 3002 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.encoding import force_text
from jinja2.sandbox import SandboxedEnvironment
class NoLanguageMatches(Exception):
pass
def render_in_context(context, template_text, html_intent=False):
"""
Render the given Jinja2 template text in the script context.
:param context: Script context.
:type context: shoop.notify.script.Context
:param template_text: Jinja2 template text.
:type template_text: str
:param html_intent: Is the template text intended for HTML output?
This currently turns on autoescaping.
:type html_intent: bool
:return: Rendered template text
:rtype: str
:raises: Whatever Jinja2 might happen to raise
"""
# TODO: Add some filters/globals into this environment?
env = SandboxedEnvironment(autoescape=html_intent)
template = env.from_string(template_text)
return template.render(context.get_variables())
class Template(object):
def __init__(self, context, data):
"""
:param context: Script context
:type context: shoop.notify.script.Context
:param data: Template data dictionary
:type data: dict
"""
self.context = context
self.data = data
def _get_language_data(self, language):
return self.data.get(force_text(language).lower(), {})
def has_language(self, language, fields):
data = self._get_language_data(language)
return set(data.keys()) >= set(fields)
def render(self, language, fields):
"""
Render this template in the given language,
returning the given fields.
:param language: Language code (ISO 639-1 or ISO 639-2)
:type language: str
:param fields: Desired fields to render.
:type fields: list[str]
:return: Dict of field -> rendered content.
:rtype: dict[str, str]
"""
data = self._get_language_data(language)
rendered = {}
for field in fields:
field_template = data.get(field)
if field_template: # pragma: no branch
rendered[field] = render_in_context(self.context, field_template, html_intent=False)
return rendered
def render_first_match(self, language_preferences, fields):
# TODO: Document
for language in language_preferences:
if self.has_language(language, fields):
rendered = self.render(language=language, fields=fields)
rendered["_language"] = language
return rendered
raise NoLanguageMatches("No language in template matches any of languages %r for fields %r" % (
language_preferences, fields
))
| agpl-3.0 |
GodBlessPP/w16b_test | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/util.py | 696 | 9917 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| agpl-3.0 |
Arkapravo/morse-0.6 | src/morse/actuators/destination.py | 1 | 3226 | import logging; logger = logging.getLogger("morse." + __name__)
import morse.core.actuator
from morse.helpers.components import add_data, add_property
class DestinationActuatorClass(morse.core.actuator.MorseActuatorClass):
""" Destination motion controller
This controller will receive a destination point and
make the robot move to that location by moving without turning.
"""
_name = "Destination"
_short_desc = "Instruct the robot to move towards a given target"
add_data('x', 'current X pos')
add_data('y', 'current Y pos')
add_data('z', 'current Z pos')
add_property('_tolerance', 0.5, 'Tolerance')
add_property('_speed', 5.0, 'Speed')
def __init__(self, obj, parent=None):
logger.info('%s initialization' % obj.name)
# Call the constructor of the parent class
super(self.__class__,self).__init__(obj, parent)
self.destination = self.blender_obj.position
#self.local_data['speed'] = 0.0
self.local_data['x'] = self.destination[0]
self.local_data['y'] = self.destination[1]
self.local_data['z'] = self.destination[2]
logger.info('Component initialized')
def default_action(self):
""" Move the object towards the destination. """
parent = self.robot_parent
self.destination = [ self.local_data['x'], self.local_data['y'], self.local_data['z'] ]
logger.debug("STRAIGHT GOT DESTINATION: {0}".format(self.destination))
logger.debug("Robot {0} move status: '{1}'".format(parent.blender_obj.name, parent.move_status))
# Vectors returned are already normalised
distance, global_vector, local_vector = self.blender_obj.getVectTo(self.destination)
logger.debug("My position: {0}".format(self.blender_obj.position))
logger.debug("GOT DISTANCE: {0}".format(distance))
logger.debug("Global vector: {0}".format(global_vector))
logger.debug("Local vector: {0}".format(local_vector))
if distance > self._tolerance:
# Set the robot status
parent.move_status = "Transit"
# Scale the speeds to the time used by Blender
try:
vx = global_vector[0] * self._speed / self.frequency
vy = global_vector[1] * self._speed / self.frequency
vz = global_vector[2] * self._speed / self.frequency
# For the moment ignoring the division by zero
# It happens apparently when the simulation starts
except ZeroDivisionError:
pass
# If the target has been reached, change the status
else:
# Reset movement variables
vx, vy, vz = 0.0, 0.0, 0.0
#rx, ry, rz = 0.0, 0.0, 0.0
parent.move_status = "Stop"
logger.debug("TARGET REACHED")
logger.debug("Robot {0} move status: '{1}'".format(parent.blender_obj.name, parent.move_status))
# Give the movement instructions directly to the parent
# The second parameter specifies a "local" movement
parent.blender_obj.applyMovement([vx, vy, vz], False)
#parent.blender_obj.applyRotation([rx, ry, rz], False)
| bsd-3-clause |
garbled1/ansible | lib/ansible/modules/network/ios/ios_banner.py | 22 | 5315 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_banner
version_added: "2.3"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage multiline banners on Cisco IOS devices
description:
- This will configure both login and motd banners on remote devices
running Cisco IOS. It allows playbooks to add or remote
banner text from the active running configuration.
extends_documentation_fragment: ios
notes:
- Tested against IOS 15.6
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
default: null
choices: ['login', 'motd']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the login banner
ios_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
ios_banner:
banner: motd
state: absent
- name: Configure banner from file
ios_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.ios import load_config, run_commands
from ansible.module_utils.ios import ios_argument_spec, check_args
import re
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent' and 'text' in have.keys() and have['text']:
commands.append('no banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and (want['text'] != have.get('text')):
banner_cmd = 'banner %s' % module.params['banner']
banner_cmd += ' @\n'
banner_cmd += want['text'].strip()
banner_cmd += '\n@'
commands.append(banner_cmd)
return commands
def map_config_to_obj(module):
rc, out, err = exec_command(module, 'show banner %s' % module.params['banner'])
if rc == 0:
output = out
else:
rc, out, err = exec_command(module,
'show running-config | begin banner %s'
% module.params['banner'])
if out:
output = re.search('\^C(.*)\^C', out, re.S).group(1).strip()
else:
output = None
obj = {'banner': module.params['banner'], 'state': 'absent'}
if output:
obj['text'] = output
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = str(text).strip()
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ios_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
response = load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Nitaco/ansible | test/units/modules/network/dellos9/test_dellos9_command.py | 46 | 4289 | # (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.dellos9 import dellos9_command
from units.modules.utils import set_module_args
from .dellos9_module import TestDellos9Module, load_fixture
class TestDellos9CommandModule(TestDellos9Module):
module = dellos9_command
def setUp(self):
super(TestDellos9CommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.dellos9.dellos9_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestDellos9CommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_dellos9_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Dell Real Time'))
def test_dellos9_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Dell Real Time'))
def test_dellos9_command_wait_for(self):
wait_for = 'result[0] contains "Dell Real"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_dellos9_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_dellos9_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_dellos9_command_match_any(self):
wait_for = ['result[0] contains "Dell Real"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_dellos9_command_match_all(self):
wait_for = ['result[0] contains "Dell Real"',
'result[0] contains "Operating System"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_dellos9_command_match_all_failure(self):
wait_for = ['result[0] contains "Dell Real"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/test/test_contextlib.py | 125 | 9103 | """Unit tests for contextlib.py, and other context managers."""
import sys
import tempfile
import unittest
from contextlib import * # Tests __all__
from test import test_support
try:
import threading
except ImportError:
threading = None
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError, e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
class NestedTestCase(unittest.TestCase):
# XXX This needs more work
def test_nested(self):
@contextmanager
def a():
yield 1
@contextmanager
def b():
yield 2
@contextmanager
def c():
yield 3
with nested(a(), b(), c()) as (x, y, z):
self.assertEqual(x, 1)
self.assertEqual(y, 2)
self.assertEqual(z, 3)
def test_nested_cleanup(self):
state = []
@contextmanager
def a():
state.append(1)
try:
yield 2
finally:
state.append(3)
@contextmanager
def b():
state.append(4)
try:
yield 5
finally:
state.append(6)
with self.assertRaises(ZeroDivisionError):
with nested(a(), b()) as (x, y):
state.append(x)
state.append(y)
1 // 0
self.assertEqual(state, [1, 4, 2, 5, 6, 3])
def test_nested_right_exception(self):
@contextmanager
def a():
yield 1
class b(object):
def __enter__(self):
return 2
def __exit__(self, *exc_info):
try:
raise Exception()
except:
pass
with self.assertRaises(ZeroDivisionError):
with nested(a(), b()) as (x, y):
1 // 0
self.assertEqual((x, y), (1, 2))
def test_nested_b_swallows(self):
@contextmanager
def a():
yield
@contextmanager
def b():
try:
yield
except:
# Swallow the exception
pass
try:
with nested(a(), b()):
1 // 0
except ZeroDivisionError:
self.fail("Didn't swallow ZeroDivisionError")
def test_nested_break(self):
@contextmanager
def a():
yield
state = 0
while True:
state += 1
with nested(a(), a()):
break
state += 10
self.assertEqual(state, 1)
def test_nested_continue(self):
@contextmanager
def a():
yield
state = 0
while state < 3:
state += 1
with nested(a(), a()):
continue
state += 10
self.assertEqual(state, 3)
def test_nested_return(self):
@contextmanager
def a():
try:
yield
except:
pass
def foo():
with nested(a(), a()):
return 1
return 10
self.assertEqual(foo(), 1)
class ClosingTestCase(unittest.TestCase):
# XXX This needs more work
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 // 0
self.assertEqual(state, [1])
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 // 0
self.assertTrue(f.closed)
finally:
test_support.unlink(tfn)
@unittest.skipUnless(threading, 'Threading required for this test.')
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 // 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
# This is needed to make the test actually run under regrtest.py!
def test_main():
with test_support.check_warnings(("With-statements now directly support "
"multiple context managers",
DeprecationWarning)):
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| mit |
cpcloud/ibis | ibis/pandas/execution/tests/test_structs.py | 1 | 2175 | from collections import OrderedDict
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
@pytest.fixture(scope="module")
def value():
return OrderedDict([("fruit", "pear"), ("weight", 0)])
@pytest.fixture(scope="module")
def struct_client(value):
df = pd.DataFrame(
{
"s": [
OrderedDict([("fruit", "apple"), ("weight", None)]),
value,
OrderedDict([("fruit", "pear"), ("weight", 1)]),
],
"key": list("aab"),
"value": [1, 2, 3],
}
)
return ibis.pandas.connect({"t": df})
@pytest.fixture
def struct_table(struct_client):
return struct_client.table(
"t",
schema={
"s": dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
},
)
def test_struct_field_literal(value):
struct = ibis.literal(value)
assert struct.type() == dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
expr = struct.fruit
result = ibis.pandas.execute(expr)
assert result == "pear"
expr = struct.weight
result = ibis.pandas.execute(expr)
assert result == 0
def test_struct_field_series(struct_table):
t = struct_table
expr = t.s.fruit
result = expr.execute()
expected = pd.Series(["apple", "pear", "pear"], name="fruit")
tm.assert_series_equal(result, expected)
def test_struct_field_series_group_by_key(struct_table):
t = struct_table
expr = t.groupby(t.s.fruit).aggregate(total=t.value.sum())
result = expr.execute()
expected = pd.DataFrame(
[("apple", 1), ("pear", 5)], columns=["fruit", "total"]
)
tm.assert_frame_equal(result, expected)
def test_struct_field_series_group_by_value(struct_table):
t = struct_table
expr = t.groupby(t.key).aggregate(total=t.s.weight.sum())
result = expr.execute()
# these are floats because we have a NULL value in the input data
expected = pd.DataFrame([("a", 0.0), ("b", 1.0)], columns=["key", "total"])
tm.assert_frame_equal(result, expected)
| apache-2.0 |
andrius-preimantas/odoo | openerp/tools/float_utils.py | 151 | 9267 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None):
"""Return ``value`` rounded to ``precision_digits``
decimal digits, minimizing IEEE-754 floating point representation
errors, and applying HALF-UP (away from zero) tie-breaking rule.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
| agpl-3.0 |
davidvon/pipa-pay-server | site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| apache-2.0 |
radinformatics/whatisit | whatisit/apps/wordfish/storage.py | 1 | 2207 | from django.core.files.storage import FileSystemStorage
from django.core.files.move import file_move_safe
from django.contrib.auth.models import User
from django.apps import apps
from fnmatch import fnmatch
from whatisit.settings import (
MEDIA_ROOT,
MEDIA_URL
)
import errno
import itertools
import os
import tempfile
############################################################################
# Storage Models
############################################################################
class WhatisitStorage(FileSystemStorage):
def __init__(self, location=None, base_url=None):
if location is None:
location = MEDIA_ROOT
if base_url is None:
base_url = MEDIA_URL
super(WhatisitStorage, self).__init__(location, base_url)
def url(self, name):
uid = None
spath, file_name = os.path.split(name)
urlsects = [v for v in spath.split('/') if v]
for i in range(len(urlsects)):
sect = urlsects.pop(0)
if sect.isdigit():
collection_id = sect
break
report_path = '/'.join(urlsects)
coll_model = apps.get_model('whatisit', 'ReportCollection')
collection = coll_model.objects.get(id=uid)
#if collection.private:
# cid = collection.private_token
#else:
cid = collection.id
return os.path.join(self.base_url, str(cid), cont_path, file_name)
class ImageStorage(WhatisitStorage):
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
return name
| mit |
yyu168/linux | tools/testing/selftests/bpf/tcp_server.py | 61 | 1778 | #!/usr/bin/env python2
#
# SPDX-License-Identifier: GPL-2.0
#
import sys, os, os.path, getopt
import socket, time
import subprocess
import select
def read(sock, n):
buf = ''
while len(buf) < n:
rem = n - len(buf)
try: s = sock.recv(rem)
except (socket.error), e: return ''
buf += s
return buf
def send(sock, s):
total = len(s)
count = 0
while count < total:
try: n = sock.send(s)
except (socket.error), e: n = 0
if n == 0:
return count;
count += n
return count
SERVER_PORT = 12877
MAX_PORTS = 2
serverPort = SERVER_PORT
serverSocket = None
HostName = socket.gethostname()
# create passive socket
serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
host = socket.gethostname()
try: serverSocket.bind((host, 0))
except socket.error as msg:
print 'bind fails: ', msg
sn = serverSocket.getsockname()
serverPort = sn[1]
cmdStr = ("./tcp_client.py %d &") % (serverPort)
os.system(cmdStr)
buf = ''
n = 0
while n < 500:
buf += '.'
n += 1
serverSocket.listen(MAX_PORTS)
readList = [serverSocket]
while True:
readyRead, readyWrite, inError = \
select.select(readList, [], [], 2)
if len(readyRead) > 0:
waitCount = 0
for sock in readyRead:
if sock == serverSocket:
(clientSocket, address) = serverSocket.accept()
address = str(address[0])
readList.append(clientSocket)
else:
sock.settimeout(1);
s = read(sock, 1000)
n = send(sock, buf)
sock.close()
serverSocket.close()
sys.exit(0)
else:
print 'Select timeout!'
sys.exit(1)
| gpl-2.0 |
blue-yonder/pyscaffold | tests/test_api.py | 1 | 8054 | # -*- coding: utf-8 -*-
from os.path import exists as path_exists
from os.path import getmtime
import pytest
from pyscaffold import templates
from pyscaffold.api import (
Extension,
create_project,
discover_actions,
get_default_options,
helpers,
verify_project_dir,
)
from pyscaffold.exceptions import (
DirectoryAlreadyExists,
DirectoryDoesNotExist,
GitNotConfigured,
GitNotInstalled,
InvalidIdentifier,
)
def create_extension(*hooks):
"""Shorthand to define extensions from a list of actions"""
class TestExtension(Extension):
def activate(self, actions):
for hook in hooks:
actions = self.register(actions, hook, after="define_structure")
return actions
return TestExtension("TestExtension")
def test_discover_actions():
# Given an extension with actions,
def fake_action(struct, opts):
return struct, opts
def extension(actions):
return [fake_action] + actions
# When discover_actions is called,
actions = discover_actions([extension])
# Then the extension actions should be listed alongside default actions.
assert get_default_options in actions
assert fake_action in actions
def test_create_project_call_extension_hooks(tmpfolder, git_mock):
# Given an extension with hooks,
called = []
def pre_hook(struct, opts):
called.append("pre_hook")
return struct, opts
def post_hook(struct, opts):
called.append("post_hook")
return struct, opts
# when created project is called,
create_project(project="proj", extensions=[create_extension(pre_hook, post_hook)])
# then the hooks should also be called.
assert "pre_hook" in called
assert "post_hook" in called
def test_create_project_generate_extension_files(tmpfolder, git_mock):
# Given a blank state,
assert not path_exists("proj/tests/extra.file")
assert not path_exists("proj/tests/another.file")
# and an extension with extra files,
def add_files(struct, opts):
struct = helpers.ensure(struct, "proj/tests/extra.file", "content")
struct = helpers.merge(struct, {"proj": {"tests": {"another.file": "content"}}})
return struct, opts
# when the created project is called,
create_project(project="proj", extensions=[create_extension(add_files)])
# then the files should be created
assert path_exists("proj/tests/extra.file")
assert tmpfolder.join("proj/tests/extra.file").read() == "content"
assert path_exists("proj/tests/another.file")
assert tmpfolder.join("proj/tests/another.file").read() == "content"
def test_create_project_respect_update_rules(tmpfolder, git_mock):
# Given an existing project
opts = dict(project="proj")
create_project(opts)
for i in (0, 1, 3, 5, 6):
tmpfolder.ensure("proj/tests/file" + str(i)).write("old")
assert path_exists("proj/tests/file" + str(i))
# and an extension with extra files
def add_files(struct, opts):
nov, ncr = helpers.NO_OVERWRITE, helpers.NO_CREATE
struct = helpers.ensure(struct, "proj/tests/file0", "new")
struct = helpers.ensure(struct, "proj/tests/file1", "new", nov)
struct = helpers.ensure(struct, "proj/tests/file2", "new", ncr)
struct = helpers.merge(
struct,
{
"proj": {
"tests": {
"file3": ("new", nov),
"file4": ("new", ncr),
"file5": ("new", None),
"file6": "new",
}
}
},
)
return struct, opts
# When the created project is called,
create_project(
project="proj", update=True, extensions=[create_extension(add_files)]
)
# then the NO_CREATE files should not be created,
assert not path_exists("proj/tests/file2")
assert not path_exists("proj/tests/file4")
# the NO_OVERWRITE files should not be updated
assert tmpfolder.join("proj/tests/file1").read() == "old"
assert tmpfolder.join("proj/tests/file3").read() == "old"
# and files with no rules or `None` rules should be updated
assert tmpfolder.join("proj/tests/file0").read() == "new"
assert tmpfolder.join("proj/tests/file5").read() == "new"
assert tmpfolder.join("proj/tests/file6").read() == "new"
def test_create_project_when_folder_exists(tmpfolder, git_mock):
tmpfolder.ensure("my-project", dir=True)
opts = dict(project="my-project")
with pytest.raises(DirectoryAlreadyExists):
create_project(opts)
opts = dict(project="my-project", force=True)
create_project(opts)
def test_create_project_with_valid_package_name(tmpfolder, git_mock):
opts = dict(project="my-project", package="my_package")
create_project(opts)
def test_create_project_with_invalid_package_name(tmpfolder, git_mock):
opts = dict(project="my-project", package="my:package")
with pytest.raises(InvalidIdentifier):
create_project(opts)
def test_create_project_when_updating(tmpfolder, git_mock):
opts = dict(project="my-project")
create_project(opts)
opts = dict(project="my-project", update=True)
create_project(opts)
assert path_exists("my-project")
def test_create_project_with_license(tmpfolder, git_mock):
_, opts = get_default_options({}, dict(project="my-project", license="new-bsd"))
# ^ The entire default options are needed, since template
# uses computed information
create_project(opts)
assert path_exists("my-project")
content = tmpfolder.join("my-project/LICENSE.txt").read()
assert content == templates.license(opts)
def test_get_default_opts():
_, opts = get_default_options(
{}, dict(project="project", package="package", description="description")
)
assert all(k in opts for k in "project update force author".split())
assert isinstance(opts["extensions"], list)
assert isinstance(opts["requirements"], list)
def test_get_default_opts_with_nogit(nogit_mock):
with pytest.raises(GitNotInstalled):
get_default_options({}, dict(project="my-project"))
def test_get_default_opts_with_git_not_configured(noconfgit_mock):
with pytest.raises(GitNotConfigured):
get_default_options({}, dict(project="my-project"))
def test_verify_project_dir_when_project_doesnt_exist_and_updating(tmpfolder, git_mock):
with pytest.raises(DirectoryDoesNotExist):
verify_project_dir({}, dict(project="my-project", update=True))
def test_verify_project_dir_when_project_exist_but_not_updating(tmpfolder, git_mock):
tmpfolder.ensure("my-project", dir=True)
with pytest.raises(DirectoryAlreadyExists):
verify_project_dir({}, dict(project="my-project", update=False, force=False))
def test_api(tmpfolder):
opts = dict(project="created_proj_with_api")
create_project(opts)
assert path_exists("created_proj_with_api")
assert path_exists("created_proj_with_api/.git")
def test_pretend(tmpfolder):
opts = dict(project="created_proj_with_api", pretend=True)
create_project(opts)
assert not path_exists("created_proj_with_api")
def test_pretend_when_updating_does_not_make_changes(tmpfolder):
# Given a project already exists
opts = dict(project="proj", license="mit")
create_project(opts)
setup_changed = getmtime("proj/setup.cfg")
license_changed = getmtime("proj/LICENSE.txt")
# When it is updated with different configuration,
create_project(
project="proj",
update=True,
force=True,
pretend=True,
url="my.project.net",
license="mozilla",
)
# Then nothing should change
assert getmtime("proj/setup.cfg") == setup_changed
assert "my.project.net" not in tmpfolder.join("proj/setup.cfg").read()
assert getmtime("proj/LICENSE.txt") == license_changed
assert "MIT License" in tmpfolder.join("proj/LICENSE.txt").read()
| mit |
gwpy/vet | gwvet/metric/__init__.py | 1 | 1693 | # coding=utf-8
# Copyright (C) Duncan Macleod (2014)
#
# This file is part of GWVeto.
#
# GWVeto is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWVeto is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWVeto. If not, see <http://www.gnu.org/licenses/>.
""".. currentmodule:: gwvet
#######
Metrics
#######
GWpy VET defines a custom `Metric` object, designed to wrap existing
figure-of-merit functions into a standard format such that they can be
applied conveniently to a set of segments and event triggers.
"""
from .core import ( # noqa: F401
Metric,
read_all,
evaluate,
)
from .registry import ( # noqa: F401
register_metric,
get_all_metrics,
get_metric,
)
from .metrics import ( # noqa: F401
_use_dqflag,
deadtime,
efficiency,
efficiency_over_deadtime,
use_percentage,
safety,
loudest_event_metric_factory,
metric_by_column_value_factory,
)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__all__ = ['Metric', 'register_metric', 'get_metric', 'get_all_metrics',
'read_all', 'evaluate', 'deadtime', 'efficiency', 'safety',
'efficiency_over_deadtime', 'use_percentage',
'loudest_by_column_value_factory', 'loudest_event_metric_factory']
| gpl-3.0 |
dwightgunning/django | django/test/client.py | 132 | 26745 | from __future__ import unicode_literals
import json
import mimetypes
import os
import re
import sys
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core import urlresolvers
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import ISO_8859_1, UTF_8, WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.utils import six
from django.utils.encoding import force_bytes, force_str, uri_to_iri
from django.utils.functional import SimpleLazyObject, curry
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.six.moves.urllib.parse import urlparse, urlsplit
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class RedirectCycleError(Exception):
"""
The test client has been asked to follow a redirect loop.
"""
def __init__(self, message, last_response):
super(RedirectCycleError, self).__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
filename = os.path.basename(file.name) if hasattr(file, 'name') else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = uri_to_iri(path).encode(UTF_8)
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. We replicate this behavior here.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode(ISO_8859_1) if six.PY3 else path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"Construct a TRACE request."
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(force_str(path))
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
else:
s = engine.SessionStore()
s.save()
self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key
return s
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = curry(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(
lambda: urlresolvers.resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""
Send a TRACE request to the server.
"""
response = super(Client, self).trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
if backend is None:
backend = settings.AUTHENTICATION_BACKENDS[0]
user.backend = backend
self._login(user)
def _login(self, user):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if 'application/json' not in response.get('Content-Type'):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
return json.loads(response.content.decode(), **extra)
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
| bsd-3-clause |
chriha/GistTerminal | helpers.py | 1 | 2293 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import os
import re
import sys
import tempfile
# see http://en.wikipedia.org/wiki/ANSI_escape_code for more ANSI escape codes
class textColors( object ):
grey = '37m'
white = '97m'
cyan = '36m'
lightcyan = '96m'
pink = '35m'
lightpink = '95m'
blue = '34m'
lightblue = '94m'
yellow = '33m'
lightyellow = '93m'
green = '32m'
lightgreen = '92m'
red = '31m'
lightred = '91m'
black = '30m'
darkgrey = '90m'
def get( self, sColor, isBold = False ):
if ( isBold ):
return '\033[1;' + vars( textColors )[sColor]
else:
return '\033[0;' + vars( textColors )[sColor]
def end( self ):
return '\033[0m'
@contextmanager
def namedTempfile():
tmpFile = tempfile.NamedTemporaryFile( delete = False )
try:
yield tmpFile
finally:
tmpFile.close()
os.unlink( tmpFile.name )
class showText( object ):
def help( self ):
print 'Usage: gist [-b] [-c] [-h] [-l] [-o] [-s <search string>] [-t <GitHub API token>]'
print 'Options:'
print ' -h Show this help'
print ' -b Open a selected Gist in the Webbrowser'
print ' -c Copy a selected Gist into your clipboard'
print ' -l List all your Gists'
print ' -s <search string> Search for a string in all Gist descriptions'
print ' -t <GitHub API token> Set your GitHub API token to access your Gists'
print '\r'
print 'Legend: ' + textColors().get( 'yellow' ) + 'private Gist' + textColors().end() + ', ' + textColors().get( 'green' ) + 'public Gist' + textColors().end() + ', ' + textColors().get( 'red' ) + 'error' + textColors().end()
class SimpleHTTPError( Exception ):
def __init__( self, code, response ):
response = json.loads( response.decode( 'utf8', 'ignore' ) )
print helpers.textColors().get( 'red' ) + response['message'] + ' (' + str( code ) + ')' + helpers.textColors().end()
sys.exit()
| mit |
bwrsandman/OpenUpgrade | addons/stock_dropshipping/wizard/__init__.py | 313 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_invoice_onshipping
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
mkrupcale/ansible | lib/ansible/modules/network/eos/eos_config.py | 25 | 12504 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = """
---
module: eos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Arista EOS configuration sections
description:
- Arista EOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with eos configuration sections in
a deterministic way. This module works with either CLI or eAPI
transports.
extends_documentation_fragment: eos
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: ['yes', 'no']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
defaults:
description:
- The I(defaults) argument will influence how the running-config
is collected from the device. When the value is set to true,
the command used to collect the running-config is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword
required: false
default: false
version_added: "2.2"
save:
description:
- The C(save) argument instructs the module to save the
running-config to startup-config. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed.
required: false
default: false
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
- eos_config:
lines: hostname {{ inventory_hostname }}
provider: "{{ cli }}"
- eos_config:
lines:
- 10 permit ip 1.1.1.1/32 any log
- 20 permit ip 2.2.2.2/32 any log
- 30 permit ip 3.3.3.3/32 any log
- 40 permit ip 4.4.4.4/32 any log
- 50 permit ip 5.5.5.5/32 any log
parents: ip access-list test
before: no ip access-list test
match: exact
provider: "{{ cli }}"
- eos_config:
lines:
- 10 permit ip 1.1.1.1/32 any log
- 20 permit ip 2.2.2.2/32 any log
- 30 permit ip 3.3.3.3/32 any log
- 40 permit ip 4.4.4.4/32 any log
parents: ip access-list test
before: no ip access-list test
replace: block
provider: "{{ cli }}"
- name: load configuration from file
eos_config:
src: eos.cfg
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/eos_config.2016-07-16@22:28:34
"""
import time
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.eos import NetworkModule, NetworkError
from ansible.module_utils.basic import get_exception
def check_args(module, warnings):
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
if not module.connection.supports_sessions():
warnings.append('The current version of EOS on the remote device does '
'not support configuration sessions. The commit '
'argument will be ignored')
def get_candidate(module):
candidate = NetworkConfig(indent=3)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def get_config(module, defaults=False):
contents = module.params['config']
if not contents:
defaults = module.params['defaults']
contents = module.config.get_config(include_defaults=defaults)
return NetworkConfig(indent=3, contents=contents)
def load_config(module, commands, result):
replace = module.params['replace'] == 'config'
commit = not module.check_mode
diff = module.config.load_config(commands, replace=replace, commit=commit)
if diff and module.connection.supports_sessions():
result['diff'] = dict(prepared=diff)
result['changed'] = True
elif diff:
result['changed'] = True
def run(module, result):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
config = get_config(module)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
module.log('commands: %s' % commands)
load_config(module, commands, result)
if module.params['save']:
if not module.check_mode:
module.config.save_config()
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(default=False, type='bool'),
)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = NetworkModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
module.params['match'] = 'none'
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kernel-sanders/arsenic-mobile | Dependencies/zope.interface-4.0.5/src/zope/interface/document.py | 30 | 3456 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Pretty-Print an Interface object as structured text (Yum)
This module provides a function, asStructuredText, for rendering an
interface as structured text.
"""
import zope.interface
def asStructuredText(I, munge=0):
""" Output structured text format. Note, this will whack any existing
'structured' format of the text. """
r = [I.getName()]
outp = r.append
level = 1
if I.getDoc():
outp(_justify_and_indent(_trim_doc_string(I.getDoc()), level))
bases = [base
for base in I.__bases__
if base is not zope.interface.Interface
]
if bases:
outp(_justify_and_indent("This interface extends:", level, munge))
level += 1
for b in bases:
item = "o %s" % b.getName()
outp(_justify_and_indent(_trim_doc_string(item), level, munge))
level -= 1
namesAndDescriptions = sorted(I.namesAndDescriptions())
outp(_justify_and_indent("Attributes:", level, munge))
level += 1
for name, desc in namesAndDescriptions:
if not hasattr(desc, 'getSignatureString'): # ugh...
item = "%s -- %s" % (desc.getName(),
desc.getDoc() or 'no documentation')
outp(_justify_and_indent(_trim_doc_string(item), level, munge))
level -= 1
outp(_justify_and_indent("Methods:", level, munge))
level += 1
for name, desc in namesAndDescriptions:
if hasattr(desc, 'getSignatureString'): # ugh...
item = "%s%s -- %s" % (desc.getName(),
desc.getSignatureString(),
desc.getDoc() or 'no documentation')
outp(_justify_and_indent(_trim_doc_string(item), level, munge))
return "\n\n".join(r) + "\n\n"
def _trim_doc_string(text):
""" Trims a doc string to make it format
correctly with structured text. """
lines = text.replace('\r\n', '\n').split('\n')
nlines = [lines.pop(0)]
if lines:
min_indent = min([len(line) - len(line.lstrip())
for line in lines])
for line in lines:
nlines.append(line[min_indent:])
return '\n'.join(nlines)
def _justify_and_indent(text, level, munge=0, width=72):
""" indent and justify text, rejustify (munge) if specified """
indent = " " * level
if munge:
lines = []
line = indent
text = text.split()
for word in text:
line = ' '.join([line, word])
if len(line) > width:
lines.append(line)
line = indent
else:
lines.append(line)
return '\n'.join(lines)
else:
return indent + \
text.strip().replace("\r\n", "\n") .replace("\n", "\n" + indent)
| gpl-3.0 |
edx/edx-platform | lms/djangoapps/ccx/tests/test_tasks.py | 4 | 3442 | """
Tests for celery tasks defined in tasks module
"""
import contextlib
from unittest import mock
from ccx_keys.locator import CCXLocator
from common.djangoapps.student.roles import CourseCcxCoachRole
from common.djangoapps.student.tests.factories import AdminFactory
from lms.djangoapps.ccx.tasks import send_ccx_course_published
from lms.djangoapps.ccx.tests.factories import CcxFactory
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.django import SignalHandler
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@contextlib.contextmanager
def mock_signal_receiver(signal): # lint-amnesty, pylint: disable=missing-function-docstring
receiver = mock.Mock()
signal.connect(receiver)
yield receiver
signal.disconnect(receiver)
class TestSendCCXCoursePublished(ModuleStoreTestCase):
"""
Unit tests for the send ccx course published task
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
ENABLED_SIGNALS = ['course_published']
def setUp(self):
"""
Set up tests
"""
super().setUp()
course = self.course = CourseFactory.create(org="edX", course="999", display_name="Run 666")
course2 = self.course2 = CourseFactory.create(org="edX", course="999a", display_name="Run 667")
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
self.ccx2 = CcxFactory(course_id=course.id, coach=coach)
self.ccx3 = CcxFactory(course_id=course.id, coach=coach)
self.ccx4 = CcxFactory(course_id=course2.id, coach=coach)
def call_fut(self, course_key):
"""
Call the function under test
"""
send_ccx_course_published(str(course_key))
def test_signal_not_sent_for_ccx(self):
"""
Check that course published signal is not sent when course key is for a ccx
"""
course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.call_fut(course_key)
assert receiver.call_count == 0
def test_signal_sent_for_ccx(self):
"""
Check that course published signal is sent when course key is not for a ccx.
We have 4 ccx's, but only 3 are derived from the course id used here, so call
count must be 3 to confirm that all derived courses and no more got the signal.
"""
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.call_fut(self.course.id)
assert receiver.call_count == 3
def test_course_overview_cached(self):
"""
Check that course overview is cached after course published signal is sent
"""
course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
overview = CourseOverview.objects.filter(id=course_key)
assert len(overview) == 0
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.call_fut(self.course.id)
assert receiver.call_count == 3
overview = CourseOverview.objects.filter(id=course_key)
assert len(overview) == 1
| agpl-3.0 |
lucasa/landell_gst-gengui | sltv/gstmanager/sbinmanager.py | 2 | 1441 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger('sbinmanager')
class SBinManager(object):
def __init__(self):
self.pipeline_desc = ""
self.check_for_compat = True
def add_sbin(self, element):
if self.check_for_compat and element.type.find("source")!= -1:
if element.sbin.find("tee name=%s_tee" %element.tags[0])!=-1:
logger.info("Adding %s source %s to pipeline" %(element.type, element.description))
self._add_sbin(element.sbin)
else:
oks = 0
for tag in element.tags:
if self.pipeline_desc.find("name=%s_tee" %tag)!=-1:
oks += 1
if not len(element.tags) == oks:
logger.error("Compatible %s source branch not found to fit %s" %(element.type, element.description))
else:
logger.info("Adding branch %s %s to pipeline" %(element.type, element.description))
self._add_sbin(element.sbin)
else:
self._add_sbin(element.sbin)
def add_many(self, *args):
for element in args:
if element is not None:
self.add_sbin(element)
def _add_sbin(self, sbin):
self.pipeline_desc += "%s " %sbin
def get_pipeline(self):
logger.info("Pipeline is:\n%s" %self.pipeline_desc)
| gpl-2.0 |
flyher/pymo | symbian/PythonForS60_1.9.6/module-repo/standard-modules/encodings/cp1258.py | 593 | 13620 | """ Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1258',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
u'\u0303' # 0xDE -> COMBINING TILDE
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\u0323' # 0xF2 -> COMBINING DOT BELOW
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
u'\u20ab' # 0xFE -> DONG SIGN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
usmschuck/canvas | vendor/bundle/ruby/1.9.1/gems/pygments.rb-0.5.0/vendor/pygments-main/pygments/lexers/_asybuiltins.py | 369 | 27319 | # -*- coding: utf-8 -*-
"""
pygments.lexers._asybuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the asy-function names and asy-variable names of
Asymptote.
Do not edit the ASYFUNCNAME and ASYVARNAME sets by hand.
TODO: perl/python script in Asymptote SVN similar to asy-list.pl but only
for function and variable names.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
ASYFUNCNAME = set([
'AND',
'Arc',
'ArcArrow',
'ArcArrows',
'Arrow',
'Arrows',
'Automatic',
'AvantGarde',
'BBox',
'BWRainbow',
'BWRainbow2',
'Bar',
'Bars',
'BeginArcArrow',
'BeginArrow',
'BeginBar',
'BeginDotMargin',
'BeginMargin',
'BeginPenMargin',
'Blank',
'Bookman',
'Bottom',
'BottomTop',
'Bounds',
'Break',
'Broken',
'BrokenLog',
'Ceil',
'Circle',
'CircleBarIntervalMarker',
'Cos',
'Courier',
'CrossIntervalMarker',
'DefaultFormat',
'DefaultLogFormat',
'Degrees',
'Dir',
'DotMargin',
'DotMargins',
'Dotted',
'Draw',
'Drawline',
'Embed',
'EndArcArrow',
'EndArrow',
'EndBar',
'EndDotMargin',
'EndMargin',
'EndPenMargin',
'Fill',
'FillDraw',
'Floor',
'Format',
'Full',
'Gaussian',
'Gaussrand',
'Gaussrandpair',
'Gradient',
'Grayscale',
'Helvetica',
'Hermite',
'HookHead',
'InOutTicks',
'InTicks',
'J',
'Label',
'Landscape',
'Left',
'LeftRight',
'LeftTicks',
'Legend',
'Linear',
'Link',
'Log',
'LogFormat',
'Margin',
'Margins',
'Mark',
'MidArcArrow',
'MidArrow',
'NOT',
'NewCenturySchoolBook',
'NoBox',
'NoMargin',
'NoModifier',
'NoTicks',
'NoTicks3',
'NoZero',
'NoZeroFormat',
'None',
'OR',
'OmitFormat',
'OmitTick',
'OutTicks',
'Ox',
'Oy',
'Palatino',
'PaletteTicks',
'Pen',
'PenMargin',
'PenMargins',
'Pentype',
'Portrait',
'RadialShade',
'Rainbow',
'Range',
'Relative',
'Right',
'RightTicks',
'Rotate',
'Round',
'SQR',
'Scale',
'ScaleX',
'ScaleY',
'ScaleZ',
'Seascape',
'Shift',
'Sin',
'Slant',
'Spline',
'StickIntervalMarker',
'Straight',
'Symbol',
'Tan',
'TeXify',
'Ticks',
'Ticks3',
'TildeIntervalMarker',
'TimesRoman',
'Top',
'TrueMargin',
'UnFill',
'UpsideDown',
'Wheel',
'X',
'XEquals',
'XOR',
'XY',
'XYEquals',
'XYZero',
'XYgrid',
'XZEquals',
'XZZero',
'XZero',
'XZgrid',
'Y',
'YEquals',
'YXgrid',
'YZ',
'YZEquals',
'YZZero',
'YZero',
'YZgrid',
'Z',
'ZX',
'ZXgrid',
'ZYgrid',
'ZapfChancery',
'ZapfDingbats',
'_cputime',
'_draw',
'_eval',
'_image',
'_labelpath',
'_projection',
'_strokepath',
'_texpath',
'aCos',
'aSin',
'aTan',
'abort',
'abs',
'accel',
'acos',
'acosh',
'acot',
'acsc',
'add',
'addArrow',
'addMargins',
'addSaveFunction',
'addnode',
'addnodes',
'addpenarc',
'addpenline',
'addseg',
'adjust',
'alias',
'align',
'all',
'altitude',
'angabscissa',
'angle',
'angpoint',
'animate',
'annotate',
'anticomplementary',
'antipedal',
'apply',
'approximate',
'arc',
'arcarrowsize',
'arccircle',
'arcdir',
'arcfromcenter',
'arcfromfocus',
'arclength',
'arcnodesnumber',
'arcpoint',
'arcsubtended',
'arcsubtendedcenter',
'arctime',
'arctopath',
'array',
'arrow',
'arrow2',
'arrowbase',
'arrowbasepoints',
'arrowsize',
'asec',
'asin',
'asinh',
'ask',
'assert',
'asy',
'asycode',
'asydir',
'asyfigure',
'asyfilecode',
'asyinclude',
'asywrite',
'atan',
'atan2',
'atanh',
'atbreakpoint',
'atexit',
'atime',
'attach',
'attract',
'atupdate',
'autoformat',
'autoscale',
'autoscale3',
'axes',
'axes3',
'axialshade',
'axis',
'axiscoverage',
'azimuth',
'babel',
'background',
'bangles',
'bar',
'barmarksize',
'barsize',
'basealign',
'baseline',
'bbox',
'beep',
'begin',
'beginclip',
'begingroup',
'beginpoint',
'between',
'bevel',
'bezier',
'bezierP',
'bezierPP',
'bezierPPP',
'bezulate',
'bibliography',
'bibliographystyle',
'binarytree',
'binarytreeNode',
'binomial',
'binput',
'bins',
'bisector',
'bisectorpoint',
'blend',
'boutput',
'box',
'bqe',
'breakpoint',
'breakpoints',
'brick',
'buildRestoreDefaults',
'buildRestoreThunk',
'buildcycle',
'bulletcolor',
'canonical',
'canonicalcartesiansystem',
'cartesiansystem',
'case1',
'case2',
'case3',
'cbrt',
'cd',
'ceil',
'center',
'centerToFocus',
'centroid',
'cevian',
'change2',
'changecoordsys',
'checkSegment',
'checkconditionlength',
'checker',
'checklengths',
'checkposition',
'checktriangle',
'choose',
'circle',
'circlebarframe',
'circlemarkradius',
'circlenodesnumber',
'circumcenter',
'circumcircle',
'clamped',
'clear',
'clip',
'clipdraw',
'close',
'cmyk',
'code',
'colatitude',
'collect',
'collinear',
'color',
'colorless',
'colors',
'colorspace',
'comma',
'compassmark',
'complement',
'complementary',
'concat',
'concurrent',
'cone',
'conic',
'conicnodesnumber',
'conictype',
'conj',
'connect',
'containmentTree',
'contains',
'contour',
'contour3',
'controlSpecifier',
'convert',
'coordinates',
'coordsys',
'copy',
'cos',
'cosh',
'cot',
'countIntersections',
'cputime',
'crop',
'cropcode',
'cross',
'crossframe',
'crosshatch',
'crossmarksize',
'csc',
'cubicroots',
'curabscissa',
'curlSpecifier',
'curpoint',
'currentarrow',
'currentexitfunction',
'currentmomarrow',
'currentpolarconicroutine',
'curve',
'cut',
'cutafter',
'cutbefore',
'cyclic',
'cylinder',
'debugger',
'deconstruct',
'defaultdir',
'defaultformat',
'defaultpen',
'defined',
'degenerate',
'degrees',
'delete',
'deletepreamble',
'determinant',
'diagonal',
'diamond',
'diffdiv',
'dir',
'dirSpecifier',
'dirtime',
'display',
'distance',
'divisors',
'do_overpaint',
'dot',
'dotframe',
'dotsize',
'downcase',
'draw',
'drawAll',
'drawDoubleLine',
'drawFermion',
'drawGhost',
'drawGluon',
'drawMomArrow',
'drawPhoton',
'drawScalar',
'drawVertex',
'drawVertexBox',
'drawVertexBoxO',
'drawVertexBoxX',
'drawVertexO',
'drawVertexOX',
'drawVertexTriangle',
'drawVertexTriangleO',
'drawVertexX',
'drawarrow',
'drawarrow2',
'drawline',
'drawtick',
'duplicate',
'elle',
'ellipse',
'ellipsenodesnumber',
'embed',
'embed3',
'empty',
'enclose',
'end',
'endScript',
'endclip',
'endgroup',
'endl',
'endpoint',
'endpoints',
'eof',
'eol',
'equation',
'equations',
'erase',
'erasestep',
'erf',
'erfc',
'error',
'errorbar',
'errorbars',
'eval',
'excenter',
'excircle',
'exit',
'exitXasyMode',
'exitfunction',
'exp',
'expfactors',
'expi',
'expm1',
'exradius',
'extend',
'extension',
'extouch',
'fabs',
'factorial',
'fermat',
'fft',
'fhorner',
'figure',
'file',
'filecode',
'fill',
'filldraw',
'filloutside',
'fillrule',
'filltype',
'find',
'finite',
'finiteDifferenceJacobian',
'firstcut',
'firstframe',
'fit',
'fit2',
'fixedscaling',
'floor',
'flush',
'fmdefaults',
'fmod',
'focusToCenter',
'font',
'fontcommand',
'fontsize',
'foot',
'format',
'frac',
'frequency',
'fromCenter',
'fromFocus',
'fspline',
'functionshade',
'gamma',
'generate_random_backtrace',
'generateticks',
'gergonne',
'getc',
'getint',
'getpair',
'getreal',
'getstring',
'gettriple',
'gluon',
'gouraudshade',
'graph',
'graphic',
'gray',
'grestore',
'grid',
'grid3',
'gsave',
'halfbox',
'hatch',
'hdiffdiv',
'hermite',
'hex',
'histogram',
'history',
'hline',
'hprojection',
'hsv',
'hyperbola',
'hyperbolanodesnumber',
'hyperlink',
'hypot',
'identity',
'image',
'incenter',
'incentral',
'incircle',
'increasing',
'incrementposition',
'indexedTransform',
'indexedfigure',
'initXasyMode',
'initdefaults',
'input',
'inradius',
'insert',
'inside',
'integrate',
'interactive',
'interior',
'interp',
'interpolate',
'intersect',
'intersection',
'intersectionpoint',
'intersectionpoints',
'intersections',
'intouch',
'inverse',
'inversion',
'invisible',
'is3D',
'isDuplicate',
'isogonal',
'isogonalconjugate',
'isotomic',
'isotomicconjugate',
'isparabola',
'italic',
'item',
'key',
'kurtosis',
'kurtosisexcess',
'label',
'labelaxis',
'labelmargin',
'labelpath',
'labels',
'labeltick',
'labelx',
'labelx3',
'labely',
'labely3',
'labelz',
'labelz3',
'lastcut',
'latex',
'latitude',
'latticeshade',
'layer',
'layout',
'ldexp',
'leastsquares',
'legend',
'legenditem',
'length',
'lift',
'light',
'limits',
'line',
'linear',
'linecap',
'lineinversion',
'linejoin',
'linemargin',
'lineskip',
'linetype',
'linewidth',
'link',
'list',
'lm_enorm',
'lm_evaluate_default',
'lm_lmdif',
'lm_lmpar',
'lm_minimize',
'lm_print_default',
'lm_print_quiet',
'lm_qrfac',
'lm_qrsolv',
'locale',
'locate',
'locatefile',
'location',
'log',
'log10',
'log1p',
'logaxiscoverage',
'longitude',
'lookup',
'magnetize',
'makeNode',
'makedraw',
'makepen',
'map',
'margin',
'markangle',
'markangleradius',
'markanglespace',
'markarc',
'marker',
'markinterval',
'marknodes',
'markrightangle',
'markuniform',
'mass',
'masscenter',
'massformat',
'math',
'max',
'max3',
'maxbezier',
'maxbound',
'maxcoords',
'maxlength',
'maxratio',
'maxtimes',
'mean',
'medial',
'median',
'midpoint',
'min',
'min3',
'minbezier',
'minbound',
'minipage',
'minratio',
'mintimes',
'miterlimit',
'momArrowPath',
'momarrowsize',
'monotonic',
'multifigure',
'nativeformat',
'natural',
'needshipout',
'newl',
'newpage',
'newslide',
'newton',
'newtree',
'nextframe',
'nextnormal',
'nextpage',
'nib',
'nodabscissa',
'none',
'norm',
'normalvideo',
'notaknot',
'nowarn',
'numberpage',
'nurb',
'object',
'offset',
'onpath',
'opacity',
'opposite',
'orientation',
'orig_circlenodesnumber',
'orig_circlenodesnumber1',
'orig_draw',
'orig_ellipsenodesnumber',
'orig_ellipsenodesnumber1',
'orig_hyperbolanodesnumber',
'orig_parabolanodesnumber',
'origin',
'orthic',
'orthocentercenter',
'outformat',
'outline',
'outprefix',
'output',
'overloadedMessage',
'overwrite',
'pack',
'pad',
'pairs',
'palette',
'parabola',
'parabolanodesnumber',
'parallel',
'partialsum',
'path',
'path3',
'pattern',
'pause',
'pdf',
'pedal',
'periodic',
'perp',
'perpendicular',
'perpendicularmark',
'phantom',
'phi1',
'phi2',
'phi3',
'photon',
'piecewisestraight',
'point',
'polar',
'polarconicroutine',
'polargraph',
'polygon',
'postcontrol',
'postscript',
'pow10',
'ppoint',
'prc',
'prc0',
'precision',
'precontrol',
'prepend',
'print_random_addresses',
'project',
'projection',
'purge',
'pwhermite',
'quadrant',
'quadraticroots',
'quantize',
'quarticroots',
'quotient',
'radialshade',
'radians',
'radicalcenter',
'radicalline',
'radius',
'rand',
'randompath',
'rd',
'readline',
'realmult',
'realquarticroots',
'rectangle',
'rectangular',
'rectify',
'reflect',
'relabscissa',
'relative',
'relativedistance',
'reldir',
'relpoint',
'reltime',
'remainder',
'remark',
'removeDuplicates',
'rename',
'replace',
'report',
'resetdefaultpen',
'restore',
'restoredefaults',
'reverse',
'reversevideo',
'rf',
'rfind',
'rgb',
'rgba',
'rgbint',
'rms',
'rotate',
'rotateO',
'rotation',
'round',
'roundbox',
'roundedpath',
'roundrectangle',
'samecoordsys',
'sameside',
'sample',
'save',
'savedefaults',
'saveline',
'scale',
'scale3',
'scaleO',
'scaleT',
'scaleless',
'scientific',
'search',
'searchtree',
'sec',
'secondaryX',
'secondaryY',
'seconds',
'section',
'sector',
'seek',
'seekeof',
'segment',
'sequence',
'setpens',
'sgn',
'sgnd',
'sharpangle',
'sharpdegrees',
'shift',
'shiftless',
'shipout',
'shipout3',
'show',
'side',
'simeq',
'simpson',
'sin',
'single',
'sinh',
'size',
'size3',
'skewness',
'skip',
'slant',
'sleep',
'slope',
'slopefield',
'solve',
'solveBVP',
'sort',
'sourceline',
'sphere',
'split',
'sqrt',
'square',
'srand',
'standardizecoordsys',
'startScript',
'startTrembling',
'stdev',
'step',
'stickframe',
'stickmarksize',
'stickmarkspace',
'stop',
'straight',
'straightness',
'string',
'stripdirectory',
'stripextension',
'stripfile',
'strokepath',
'subdivide',
'subitem',
'subpath',
'substr',
'sum',
'surface',
'symmedial',
'symmedian',
'system',
'tab',
'tableau',
'tan',
'tangent',
'tangential',
'tangents',
'tanh',
'tell',
'tensionSpecifier',
'tensorshade',
'tex',
'texcolor',
'texify',
'texpath',
'texpreamble',
'texreset',
'texshipout',
'texsize',
'textpath',
'thick',
'thin',
'tick',
'tickMax',
'tickMax3',
'tickMin',
'tickMin3',
'ticklabelshift',
'ticklocate',
'tildeframe',
'tildemarksize',
'tile',
'tiling',
'time',
'times',
'title',
'titlepage',
'topbox',
'transform',
'transformation',
'transpose',
'tremble',
'trembleFuzz',
'tremble_circlenodesnumber',
'tremble_circlenodesnumber1',
'tremble_draw',
'tremble_ellipsenodesnumber',
'tremble_ellipsenodesnumber1',
'tremble_hyperbolanodesnumber',
'tremble_marknodes',
'tremble_markuniform',
'tremble_parabolanodesnumber',
'triangle',
'triangleAbc',
'triangleabc',
'triangulate',
'tricoef',
'tridiagonal',
'trilinear',
'trim',
'trueMagnetize',
'truepoint',
'tube',
'uncycle',
'unfill',
'uniform',
'unit',
'unitrand',
'unitsize',
'unityroot',
'unstraighten',
'upcase',
'updatefunction',
'uperiodic',
'upscale',
'uptodate',
'usepackage',
'usersetting',
'usetypescript',
'usleep',
'value',
'variance',
'variancebiased',
'vbox',
'vector',
'vectorfield',
'verbatim',
'view',
'vline',
'vperiodic',
'vprojection',
'warn',
'warning',
'windingnumber',
'write',
'xaxis',
'xaxis3',
'xaxis3At',
'xaxisAt',
'xequals',
'xinput',
'xlimits',
'xoutput',
'xpart',
'xscale',
'xscaleO',
'xtick',
'xtick3',
'xtrans',
'yaxis',
'yaxis3',
'yaxis3At',
'yaxisAt',
'yequals',
'ylimits',
'ypart',
'yscale',
'yscaleO',
'ytick',
'ytick3',
'ytrans',
'zaxis3',
'zaxis3At',
'zero',
'zero3',
'zlimits',
'zpart',
'ztick',
'ztick3',
'ztrans'
])
ASYVARNAME = set([
'AliceBlue',
'Align',
'Allow',
'AntiqueWhite',
'Apricot',
'Aqua',
'Aquamarine',
'Aspect',
'Azure',
'BeginPoint',
'Beige',
'Bisque',
'Bittersweet',
'Black',
'BlanchedAlmond',
'Blue',
'BlueGreen',
'BlueViolet',
'Both',
'Break',
'BrickRed',
'Brown',
'BurlyWood',
'BurntOrange',
'CCW',
'CW',
'CadetBlue',
'CarnationPink',
'Center',
'Centered',
'Cerulean',
'Chartreuse',
'Chocolate',
'Coeff',
'Coral',
'CornflowerBlue',
'Cornsilk',
'Crimson',
'Crop',
'Cyan',
'Dandelion',
'DarkBlue',
'DarkCyan',
'DarkGoldenrod',
'DarkGray',
'DarkGreen',
'DarkKhaki',
'DarkMagenta',
'DarkOliveGreen',
'DarkOrange',
'DarkOrchid',
'DarkRed',
'DarkSalmon',
'DarkSeaGreen',
'DarkSlateBlue',
'DarkSlateGray',
'DarkTurquoise',
'DarkViolet',
'DeepPink',
'DeepSkyBlue',
'DefaultHead',
'DimGray',
'DodgerBlue',
'Dotted',
'Draw',
'E',
'ENE',
'EPS',
'ESE',
'E_Euler',
'E_PC',
'E_RK2',
'E_RK3BS',
'Emerald',
'EndPoint',
'Euler',
'Fill',
'FillDraw',
'FireBrick',
'FloralWhite',
'ForestGreen',
'Fuchsia',
'Gainsboro',
'GhostWhite',
'Gold',
'Goldenrod',
'Gray',
'Green',
'GreenYellow',
'Honeydew',
'HookHead',
'Horizontal',
'HotPink',
'I',
'IgnoreAspect',
'IndianRed',
'Indigo',
'Ivory',
'JOIN_IN',
'JOIN_OUT',
'JungleGreen',
'Khaki',
'LM_DWARF',
'LM_MACHEP',
'LM_SQRT_DWARF',
'LM_SQRT_GIANT',
'LM_USERTOL',
'Label',
'Lavender',
'LavenderBlush',
'LawnGreen',
'LeftJustified',
'LeftSide',
'LemonChiffon',
'LightBlue',
'LightCoral',
'LightCyan',
'LightGoldenrodYellow',
'LightGreen',
'LightGrey',
'LightPink',
'LightSalmon',
'LightSeaGreen',
'LightSkyBlue',
'LightSlateGray',
'LightSteelBlue',
'LightYellow',
'Lime',
'LimeGreen',
'Linear',
'Linen',
'Log',
'Logarithmic',
'Magenta',
'Mahogany',
'Mark',
'MarkFill',
'Maroon',
'Max',
'MediumAquamarine',
'MediumBlue',
'MediumOrchid',
'MediumPurple',
'MediumSeaGreen',
'MediumSlateBlue',
'MediumSpringGreen',
'MediumTurquoise',
'MediumVioletRed',
'Melon',
'MidPoint',
'MidnightBlue',
'Min',
'MintCream',
'MistyRose',
'Moccasin',
'Move',
'MoveQuiet',
'Mulberry',
'N',
'NE',
'NNE',
'NNW',
'NW',
'NavajoWhite',
'Navy',
'NavyBlue',
'NoAlign',
'NoCrop',
'NoFill',
'NoSide',
'OldLace',
'Olive',
'OliveDrab',
'OliveGreen',
'Orange',
'OrangeRed',
'Orchid',
'Ox',
'Oy',
'PC',
'PaleGoldenrod',
'PaleGreen',
'PaleTurquoise',
'PaleVioletRed',
'PapayaWhip',
'Peach',
'PeachPuff',
'Periwinkle',
'Peru',
'PineGreen',
'Pink',
'Plum',
'PowderBlue',
'ProcessBlue',
'Purple',
'RK2',
'RK3',
'RK3BS',
'RK4',
'RK5',
'RK5DP',
'RK5F',
'RawSienna',
'Red',
'RedOrange',
'RedViolet',
'Rhodamine',
'RightJustified',
'RightSide',
'RosyBrown',
'RoyalBlue',
'RoyalPurple',
'RubineRed',
'S',
'SE',
'SSE',
'SSW',
'SW',
'SaddleBrown',
'Salmon',
'SandyBrown',
'SeaGreen',
'Seashell',
'Sepia',
'Sienna',
'Silver',
'SimpleHead',
'SkyBlue',
'SlateBlue',
'SlateGray',
'Snow',
'SpringGreen',
'SteelBlue',
'Suppress',
'SuppressQuiet',
'Tan',
'TeXHead',
'Teal',
'TealBlue',
'Thistle',
'Ticksize',
'Tomato',
'Turquoise',
'UnFill',
'VERSION',
'Value',
'Vertical',
'Violet',
'VioletRed',
'W',
'WNW',
'WSW',
'Wheat',
'White',
'WhiteSmoke',
'WildStrawberry',
'XYAlign',
'YAlign',
'Yellow',
'YellowGreen',
'YellowOrange',
'addpenarc',
'addpenline',
'align',
'allowstepping',
'angularsystem',
'animationdelay',
'appendsuffix',
'arcarrowangle',
'arcarrowfactor',
'arrow2sizelimit',
'arrowangle',
'arrowbarb',
'arrowdir',
'arrowfactor',
'arrowhookfactor',
'arrowlength',
'arrowsizelimit',
'arrowtexfactor',
'authorpen',
'axis',
'axiscoverage',
'axislabelfactor',
'background',
'backgroundcolor',
'backgroundpen',
'barfactor',
'barmarksizefactor',
'basealign',
'baselinetemplate',
'beveljoin',
'bigvertexpen',
'bigvertexsize',
'black',
'blue',
'bm',
'bottom',
'bp',
'brown',
'bullet',
'byfoci',
'byvertices',
'camerafactor',
'chartreuse',
'circlemarkradiusfactor',
'circlenodesnumberfactor',
'circleprecision',
'circlescale',
'cm',
'codefile',
'codepen',
'codeskip',
'colorPen',
'coloredNodes',
'coloredSegments',
'conditionlength',
'conicnodesfactor',
'count',
'cputimeformat',
'crossmarksizefactor',
'currentcoordsys',
'currentlight',
'currentpatterns',
'currentpen',
'currentpicture',
'currentposition',
'currentprojection',
'curvilinearsystem',
'cuttings',
'cyan',
'darkblue',
'darkbrown',
'darkcyan',
'darkgray',
'darkgreen',
'darkgrey',
'darkmagenta',
'darkolive',
'darkred',
'dashdotted',
'dashed',
'datepen',
'dateskip',
'debuggerlines',
'debugging',
'deepblue',
'deepcyan',
'deepgray',
'deepgreen',
'deepgrey',
'deepmagenta',
'deepred',
'default',
'defaultControl',
'defaultS',
'defaultbackpen',
'defaultcoordsys',
'defaultfilename',
'defaultformat',
'defaultmassformat',
'defaultpen',
'diagnostics',
'differentlengths',
'dot',
'dotfactor',
'dotframe',
'dotted',
'doublelinepen',
'doublelinespacing',
'down',
'duplicateFuzz',
'ellipsenodesnumberfactor',
'eps',
'epsgeo',
'epsilon',
'evenodd',
'extendcap',
'fermionpen',
'figureborder',
'figuremattpen',
'firstnode',
'firststep',
'foregroundcolor',
'fuchsia',
'fuzz',
'gapfactor',
'ghostpen',
'gluonamplitude',
'gluonpen',
'gluonratio',
'gray',
'green',
'grey',
'hatchepsilon',
'havepagenumber',
'heavyblue',
'heavycyan',
'heavygray',
'heavygreen',
'heavygrey',
'heavymagenta',
'heavyred',
'hline',
'hwratio',
'hyperbolanodesnumberfactor',
'identity4',
'ignore',
'inXasyMode',
'inch',
'inches',
'includegraphicscommand',
'inf',
'infinity',
'institutionpen',
'intMax',
'intMin',
'invert',
'invisible',
'itempen',
'itemskip',
'itemstep',
'labelmargin',
'landscape',
'lastnode',
'left',
'legendhskip',
'legendlinelength',
'legendmargin',
'legendmarkersize',
'legendmaxrelativewidth',
'legendvskip',
'lightblue',
'lightcyan',
'lightgray',
'lightgreen',
'lightgrey',
'lightmagenta',
'lightolive',
'lightred',
'lightyellow',
'linemargin',
'lm_infmsg',
'lm_shortmsg',
'longdashdotted',
'longdashed',
'magenta',
'magneticPoints',
'magneticRadius',
'mantissaBits',
'markangleradius',
'markangleradiusfactor',
'markanglespace',
'markanglespacefactor',
'mediumblue',
'mediumcyan',
'mediumgray',
'mediumgreen',
'mediumgrey',
'mediummagenta',
'mediumred',
'mediumyellow',
'middle',
'minDistDefault',
'minblockheight',
'minblockwidth',
'mincirclediameter',
'minipagemargin',
'minipagewidth',
'minvertexangle',
'miterjoin',
'mm',
'momarrowfactor',
'momarrowlength',
'momarrowmargin',
'momarrowoffset',
'momarrowpen',
'monoPen',
'morepoints',
'nCircle',
'newbulletcolor',
'ngraph',
'nil',
'nmesh',
'nobasealign',
'nodeMarginDefault',
'nodesystem',
'nomarker',
'nopoint',
'noprimary',
'nullpath',
'nullpen',
'numarray',
'ocgindex',
'oldbulletcolor',
'olive',
'orange',
'origin',
'overpaint',
'page',
'pageheight',
'pagemargin',
'pagenumberalign',
'pagenumberpen',
'pagenumberposition',
'pagewidth',
'paleblue',
'palecyan',
'palegray',
'palegreen',
'palegrey',
'palemagenta',
'palered',
'paleyellow',
'parabolanodesnumberfactor',
'perpfactor',
'phi',
'photonamplitude',
'photonpen',
'photonratio',
'pi',
'pink',
'plain',
'plus',
'preamblenodes',
'pt',
'purple',
'r3',
'r4a',
'r4b',
'randMax',
'realDigits',
'realEpsilon',
'realMax',
'realMin',
'red',
'relativesystem',
'reverse',
'right',
'roundcap',
'roundjoin',
'royalblue',
'salmon',
'saveFunctions',
'scalarpen',
'sequencereal',
'settings',
'shipped',
'signedtrailingzero',
'solid',
'springgreen',
'sqrtEpsilon',
'squarecap',
'squarepen',
'startposition',
'stdin',
'stdout',
'stepfactor',
'stepfraction',
'steppagenumberpen',
'stepping',
'stickframe',
'stickmarksizefactor',
'stickmarkspacefactor',
'textpen',
'ticksize',
'tildeframe',
'tildemarksizefactor',
'tinv',
'titlealign',
'titlepagepen',
'titlepageposition',
'titlepen',
'titleskip',
'top',
'trailingzero',
'treeLevelStep',
'treeMinNodeWidth',
'treeNodeStep',
'trembleAngle',
'trembleFrequency',
'trembleRandom',
'tremblingMode',
'undefined',
'unitcircle',
'unitsquare',
'up',
'urlpen',
'urlskip',
'version',
'vertexpen',
'vertexsize',
'viewportmargin',
'viewportsize',
'vline',
'white',
'wye',
'xformStack',
'yellow',
'ylabelwidth',
'zerotickfuzz',
'zerowinding'
])
| agpl-3.0 |
SummerLW/Perf-Insight-Report | third_party/gsutil/third_party/boto/boto/sqs/__init__.py | 129 | 1705 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.sqs.regioninfo import SQSRegionInfo
from boto.regioninfo import get_regions
def regions():
"""
Get all available regions for the SQS service.
:rtype: list
:return: A list of :class:`boto.sqs.regioninfo.RegionInfo`
"""
from boto.sqs.connection import SQSConnection
return get_regions(
'sqs',
region_cls=SQSRegionInfo,
connection_cls=SQSConnection
)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| bsd-3-clause |
andrewyoung1991/abjad | abjad/tools/abctools/AbjadObject.py | 1 | 3350 | # -*- encoding: utf-8 -*-
import abc
AbstractBase = abc.ABCMeta(
'AbstractBase',
(),
{
'__metaclass__': abc.ABCMeta,
'__module__': __name__,
'__slots__': (),
},
)
class AbjadObject(AbstractBase):
'''Abstract base class from which many custom classes inherit.
'''
### CLASS VARIABLES ###
__slots__ = ()
### SPECIAL METHODS ###
def __eq__(self, expr):
r'''Is true when ID of `expr` equals ID of Abjad object.
Otherwise false.
Returns boolean.
'''
return id(self) == id(expr)
def __format__(self, format_specification=''):
r'''Formats Abjad object.
Set `format_specification` to `''` or `'storage'`.
Interprets `''` equal to `'storage'`.
Returns string.
'''
from abjad.tools import systemtools
if format_specification in ('', 'storage'):
return systemtools.StorageFormatManager.get_storage_format(self)
return str(self)
def __getstate__(self):
r'''Gets state of Abjad object.
Returns dictionary.
'''
if hasattr(self, '__dict__'):
return vars(self)
state = {}
for class_ in type(self).__mro__:
for slot in getattr(class_, '__slots__', ()):
state[slot] = getattr(self, slot, None)
return state
def __hash__(self):
r'''Hashes Abjad object.
Required to be explicitly re-defined on Python 3 if __eq__ changes.
Returns integer.
'''
return super(AbjadObject, self).__hash__()
def __ne__(self, expr):
r'''Is true when Abjad object does not equal `expr`.
Otherwise false.
Returns boolean.
'''
return not self == expr
def __repr__(self):
r'''Gets interpreter representation of Abjad object.
Returns string.
'''
from abjad.tools import systemtools
return systemtools.StorageFormatManager.get_repr_format(self)
def __setstate__(self, state):
r'''Sets state of Abjad object.
Returns none.
'''
for key, value in state.items():
setattr(self, key, value)
### PRIVATE PROPERTIES ###
@property
def _one_line_menu_summary(self):
return str(self)
@property
def _repr_specification(self):
from abjad.tools.topleveltools import new
return new(
self._storage_format_specification,
is_indented=False,
)
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
return systemtools.StorageFormatSpecification(self)
### PRIVATE METHODS ###
def _debug(self, value, annotation=None, blank=False):
if annotation is None:
print('debug: {!r}'.format(value))
else:
print('debug ({}): {!r}'.format(annotation, value))
if blank:
print('')
def _debug_values(self, values, annotation=None, blank=True):
if values:
for value in values:
self._debug(value, annotation=annotation)
if blank:
print('')
else:
self._debug(repr(values), annotation=annotation)
if blank:
print('') | gpl-3.0 |
alebcay/namebench | libnamebench/charts_test.py | 175 | 5898 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for all functions related to chart generation."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import nameserver
import charts
def _ExampleRunsData():
odns = nameserver.NameServer('208.67.220.220', name='OpenDNS')
udns = nameserver.NameServer('156.154.70.1', name='UltraDNS')
data = [(odns, [79.0, 191.0, 84.0, 878.0, 82.0, 85.0, 882.0, 187.0, 79.0,
80.0, 79.0, 261.0, 79.0, 83.0, 82.0, 420.0, 822.0, 1890.0,
78.0, 79.0, 86.0, 89.0, 125.0, 94.0, 81.0, 79.0, 81.0, 79.0,
1105.0, 84.0]),
(udns, [9.0, 8.0, 13.0, 329.0, 9.0, 9.0, 773.0, 52.0, 9.0, 8.0, 8.0,
143.0, 27.0, 104.0, 8.0, 8.0, 320.0, 594.0, 8.0, 312.0, 11.0,
9.0, 174.0, 83.0, 8.0, 9.0, 8.0, 8.0, 496.0, 533.0])]
return data
# TODO(tstromberg): Clean up long lines, cleanse IP/hostnames.
class ChartFunctionsTest(unittest.TestCase):
def testDarkenHexColorCode(self):
self.assertEquals(charts.DarkenHexColorCode('ffffff', 0), 'ffffff')
self.assertEquals(charts.DarkenHexColorCode('2c2c2c', 1), '0c0c0c')
self.assertEquals(charts.DarkenHexColorCode('ff0000', 1), 'df0000')
self.assertEquals(charts.DarkenHexColorCode('ff00ff', 2), 'bf00bf')
def testGoodTicks(self):
self.assertEquals(charts._GoodTicks(50), 5)
self.assertEquals(charts._GoodTicks(9.8, tick_size=0.5, num_ticks=7), 2.0)
class BasicChartTests(unittest.TestCase):
def testPerRunDurationBarGraph(self):
sorted_averages = [
('10.0.0.1', [5.871, 2.6599]),
('192.168.1.2', [15.0867, 15.2531]),
('172.168.1.2', [70.7752, 15.02163]),
]
results = charts.PerRunDurationBarGraph(sorted_averages)
self.assertTrue('e%3AFBM48Y%2CCRNBM' in results)
expected = (
'http://chart.apis.google.com/chart?chxt=y%2Cx%2Cx&chd=e%3AFBM48Y%2'
'CCRNBM0&chxp=2%2C31&chxr=1%2C0%2C75%7C2%2C-3.75%2C78.75&chxtc=1%2C-720'
'&chco=4684ee%2C00248e&chbh=a&chs=720x130&cht=bhg&chxl=0%3A%7C'
'172.168.1.2%7C192.168.1.2%7C10.0.0.1%7C1%3A%7C0%7C5%7C10%7C15%7C20'
'%7C25%7C30%7C35%7C40%7C45%7C50%7C55%7C60%7C65%7C70%7C75%7C2%3A%7C'
'Duration%20in%20ms.&chdl=Run%201%7CRun%202'
)
self.assertEqual(results, expected)
def testMinimumDurationBarGraph(self):
fastest = ((nameserver.NameServer('208.67.220.220', name='OpenDNS'), 10.0),
(nameserver.NameServer('156.154.70.1', name='UltraDNS'), 15.75))
expected = (
'http://chart.apis.google.com/chart?chxt=y%2Cx%2Cx&chd=e%3AgAyZ&'
'chxp=2%2C9&chxr=1%2C0%2C20%7C2%2C-1.0%2C21.0&chxtc=1%2C-720'
'&chco=0000ff&chbh=a&chs=720x78&cht=bhg&chxl=0%3A%7CUltraDNS%7COpenDNS'
'%7C1%3A%7C0%7C3%7C6%7C9%7C12%7C15%7C18%7C20%7C2%3A%7C'
'Duration%20in%20ms.'
)
self.assertEquals(charts.MinimumDurationBarGraph(fastest), expected)
def testMaximumRunDuration(self):
runs_data = [
('G', [3.851, 4.7690, 423.971998, 189.674001, 14.477, 174.788001]),
('Y', [99.99, 488.88])
]
self.assertEquals(charts._MaximumRunDuration(runs_data), 488.88)
class DistributionChartTests(unittest.TestCase):
def testMakeCumulativeDistribution(self):
runs_data = _ExampleRunsData()
expected = [
(runs_data[0][0],
[(0, 0), (3.3333333333333335, 78.0),(26.666666666666668, 79.0),
(30.0, 80.0), (36.666666666666664, 81.0), (43.333333333333336, 82.0),
(46.666666666666664, 83.0), (53.333333333333336, 84.0),
(56.666666666666664, 85.0), (60.0, 86.0), (63.333333333333329, 89.0),
(66.666666666666657, 94.0), (70.0, 125.0), (73.333333333333329, 187.0),
(76.666666666666671, 191.0), (80.0, 261.0), (83.333333333333343, 420.0),
(86.666666666666671, 822.0), (90.0, 878.0), (93.333333333333329, 882.0),
(96.666666666666671, 1105.0), (100, 1890.0)]),
(runs_data[1][0],
[(0, 0), (30.0, 8.0), (50.0, 9.0), (53.333333333333336, 11.0),
(56.666666666666664, 13.0), (60.0, 27.0), (63.333333333333329, 52.0),
(66.666666666666657, 83.0), (70.0, 104.0), (73.333333333333329, 143.0),
(76.666666666666671, 174.0), (80.0, 312.0), (83.333333333333343, 320.0),
(86.666666666666671, 329.0), (90.0, 496.0), (93.333333333333329, 533.0),
(96.666666666666671, 594.0), (100, 773.0)])]
self.assertEquals(charts._MakeCumulativeDistribution(runs_data), expected)
def testDistributionLineGraph(self):
runs_data = _ExampleRunsData()
url = charts.DistributionLineGraph(runs_data, scale=350)
expected = (
'http://chart.apis.google.com/chart?cht=lxy&chs=720x410&chxt=x,y&'
'chg=10,20&chxr=0,0,350|1,0,100&chd=t:0,22,23,23,23,23,24,24,24,25,25'
',27,36,53,55,75,120|0,3,27,30,37,43,47,53,57,60,63,67,70,73,77,80,83|'
'0,2,3,3,4,8,15,24,30,41,50,89,91,94,142|0,30,50,53,57,60,63,67,70,73,'
'77,80,83,87,90&chco=ff9900,1a00ff&chxt=x,y,x,y&chxl=2:||Duration+in+ms'
'||3:||%25|&chdl=OpenDNS|UltraDNS'
)
self.assertTrue('0,3,27,30,37,43,47,53,57,60,63,67,70,73,77' in expected)
self.assertTrue('0,0,350|1,0,100' in expected)
self.assertEquals(url, expected)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
EmreAtes/spack | lib/spack/llnl/util/filesystem.py | 2 | 34246 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import collections
import errno
import hashlib
import fileinput
import glob
import numbers
import os
import re
import shutil
import stat
import sys
import tempfile
from contextlib import contextmanager
import six
from llnl.util import tty
from llnl.util.lang import dedupe
from spack.util.executable import Executable
__all__ = [
'FileFilter',
'FileList',
'HeaderList',
'LibraryList',
'ancestor',
'can_access',
'change_sed_delimiter',
'copy_mode',
'filter_file',
'find',
'find_headers',
'find_libraries',
'find_system_libraries',
'fix_darwin_install_name',
'force_remove',
'force_symlink',
'hide_files',
'install',
'install_tree',
'is_exe',
'join_path',
'mkdirp',
'remove_dead_links',
'remove_if_dead_link',
'remove_linked_tree',
'set_executable',
'set_install_permissions',
'touch',
'touchp',
'traverse_tree',
'unset_executable_mode',
'working_dir'
]
def filter_file(regex, repl, *filenames, **kwargs):
r"""Like sed, but uses python regular expressions.
Filters every line of each file through regex and replaces the file
with a filtered version. Preserves mode of filtered files.
As with re.sub, ``repl`` can be either a string or a callable.
If it is a callable, it is passed the match object and should
return a suitable replacement string. If it is a string, it
can contain ``\1``, ``\2``, etc. to represent back-substitution
as sed would allow.
Parameters:
regex (str): The regular expression to search for
repl (str): The string to replace matches with
*filenames: One or more files to search and replace
Keyword Arguments:
string (bool): Treat regex as a plain string. Default it False
backup (bool): Make backup file(s) suffixed with ``~``. Default is True
ignore_absent (bool): Ignore any files that don't exist.
Default is False
"""
string = kwargs.get('string', False)
backup = kwargs.get('backup', True)
ignore_absent = kwargs.get('ignore_absent', False)
# Allow strings to use \1, \2, etc. for replacement, like sed
if not callable(repl):
unescaped = repl.replace(r'\\', '\\')
def replace_groups_with_groupid(m):
def groupid_to_group(x):
return m.group(int(x.group(1)))
return re.sub(r'\\([1-9])', groupid_to_group, unescaped)
repl = replace_groups_with_groupid
if string:
regex = re.escape(regex)
for filename in filenames:
msg = 'FILTER FILE: {0} [replacing "{1}"]'
tty.debug(msg.format(filename, regex))
backup_filename = filename + "~"
if ignore_absent and not os.path.exists(filename):
msg = 'FILTER FILE: file "{0}" not found. Skipping to next file.'
tty.debug(msg.format(filename))
continue
# Create backup file. Don't overwrite an existing backup
# file in case this file is being filtered multiple times.
if not os.path.exists(backup_filename):
shutil.copy(filename, backup_filename)
try:
for line in fileinput.input(filename, inplace=True):
print(re.sub(regex, repl, line.rstrip('\n')))
except BaseException:
# clean up the original file on failure.
shutil.move(backup_filename, filename)
raise
finally:
if not backup and os.path.exists(backup_filename):
os.remove(backup_filename)
class FileFilter(object):
"""Convenience class for calling ``filter_file`` a lot."""
def __init__(self, *filenames):
self.filenames = filenames
def filter(self, regex, repl, **kwargs):
return filter_file(regex, repl, *self.filenames, **kwargs)
def change_sed_delimiter(old_delim, new_delim, *filenames):
"""Find all sed search/replace commands and change the delimiter.
e.g., if the file contains seds that look like ``'s///'``, you can
call ``change_sed_delimiter('/', '@', file)`` to change the
delimiter to ``'@'``.
Note that this routine will fail if the delimiter is ``'`` or ``"``.
Handling those is left for future work.
Parameters:
old_delim (str): The delimiter to search for
new_delim (str): The delimiter to replace with
*filenames: One or more files to search and replace
"""
assert(len(old_delim) == 1)
assert(len(new_delim) == 1)
# TODO: handle these cases one day?
assert(old_delim != '"')
assert(old_delim != "'")
assert(new_delim != '"')
assert(new_delim != "'")
whole_lines = "^s@([^@]*)@(.*)@[gIp]$"
whole_lines = whole_lines.replace('@', old_delim)
single_quoted = r"'s@((?:\\'|[^@'])*)@((?:\\'|[^'])*)@[gIp]?'"
single_quoted = single_quoted.replace('@', old_delim)
double_quoted = r'"s@((?:\\"|[^@"])*)@((?:\\"|[^"])*)@[gIp]?"'
double_quoted = double_quoted.replace('@', old_delim)
repl = r's@\1@\2@g'
repl = repl.replace('@', new_delim)
for f in filenames:
filter_file(whole_lines, repl, f)
filter_file(single_quoted, "'%s'" % repl, f)
filter_file(double_quoted, '"%s"' % repl, f)
def set_install_permissions(path):
"""Set appropriate permissions on the installed file."""
# If this points to a file maintained in a Spack prefix, it is assumed that
# this function will be invoked on the target. If the file is outside a
# Spack-maintained prefix, the permissions should not be modified.
if os.path.islink(path):
return
if os.path.isdir(path):
os.chmod(path, 0o755)
else:
os.chmod(path, 0o644)
def copy_mode(src, dest):
"""Set the mode of dest to that of src unless it is a link.
"""
if os.path.islink(dest):
return
src_mode = os.stat(src).st_mode
dest_mode = os.stat(dest).st_mode
if src_mode & stat.S_IXUSR:
dest_mode |= stat.S_IXUSR
if src_mode & stat.S_IXGRP:
dest_mode |= stat.S_IXGRP
if src_mode & stat.S_IXOTH:
dest_mode |= stat.S_IXOTH
os.chmod(dest, dest_mode)
def unset_executable_mode(path):
mode = os.stat(path).st_mode
mode &= ~stat.S_IXUSR
mode &= ~stat.S_IXGRP
mode &= ~stat.S_IXOTH
os.chmod(path, mode)
def install(src, dest):
"""Manually install a file to a particular location."""
tty.debug("Installing %s to %s" % (src, dest))
# Expand dest to its eventual full path if it is a directory.
if os.path.isdir(dest):
dest = join_path(dest, os.path.basename(src))
shutil.copy(src, dest)
set_install_permissions(dest)
copy_mode(src, dest)
def install_tree(src, dest, **kwargs):
"""Manually install a directory tree to a particular location."""
tty.debug("Installing %s to %s" % (src, dest))
shutil.copytree(src, dest, **kwargs)
for s, d in traverse_tree(src, dest, follow_nonexisting=False):
set_install_permissions(d)
copy_mode(s, d)
def is_exe(path):
"""True if path is an executable file."""
return os.path.isfile(path) and os.access(path, os.X_OK)
def mkdirp(*paths):
"""Creates a directory, as well as parent directories if needed."""
for path in paths:
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise e
elif not os.path.isdir(path):
raise OSError(errno.EEXIST, "File already exists", path)
def force_remove(*paths):
"""Remove files without printing errors. Like ``rm -f``, does NOT
remove directories."""
for path in paths:
try:
os.remove(path)
except OSError:
pass
@contextmanager
def working_dir(dirname, **kwargs):
if kwargs.get('create', False):
mkdirp(dirname)
orig_dir = os.getcwd()
os.chdir(dirname)
yield
os.chdir(orig_dir)
@contextmanager
def replace_directory_transaction(directory_name, tmp_root=None):
"""Moves a directory to a temporary space. If the operations executed
within the context manager don't raise an exception, the directory is
deleted. If there is an exception, the move is undone.
Args:
directory_name (path): absolute path of the directory name
tmp_root (path): absolute path of the parent directory where to create
the temporary
Returns:
temporary directory where ``directory_name`` has been moved
"""
# Check the input is indeed a directory with absolute path.
# Raise before anything is done to avoid moving the wrong directory
assert os.path.isdir(directory_name), \
'"directory_name" must be a valid directory'
assert os.path.isabs(directory_name), \
'"directory_name" must contain an absolute path'
directory_basename = os.path.basename(directory_name)
if tmp_root is not None:
assert os.path.isabs(tmp_root)
tmp_dir = tempfile.mkdtemp(dir=tmp_root)
tty.debug('TEMPORARY DIRECTORY CREATED [{0}]'.format(tmp_dir))
shutil.move(src=directory_name, dst=tmp_dir)
tty.debug('DIRECTORY MOVED [src={0}, dest={1}]'.format(
directory_name, tmp_dir
))
try:
yield tmp_dir
except (Exception, KeyboardInterrupt, SystemExit):
# Delete what was there, before copying back the original content
if os.path.exists(directory_name):
shutil.rmtree(directory_name)
shutil.move(
src=os.path.join(tmp_dir, directory_basename),
dst=os.path.dirname(directory_name)
)
tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))
msg = 'the transactional move of "{0}" failed.'
raise RuntimeError(msg.format(directory_name))
else:
# Otherwise delete the temporary directory
shutil.rmtree(tmp_dir)
tty.debug('TEMPORARY DIRECTORY DELETED [{0}]'.format(tmp_dir))
@contextmanager
def hide_files(*file_list):
try:
baks = ['%s.bak' % f for f in file_list]
for f, bak in zip(file_list, baks):
shutil.move(f, bak)
yield
finally:
for f, bak in zip(file_list, baks):
shutil.move(bak, f)
def hash_directory(directory):
"""Hashes recursively the content of a directory.
Args:
directory (path): path to a directory to be hashed
Returns:
hash of the directory content
"""
assert os.path.isdir(directory), '"directory" must be a directory!'
md5_hash = hashlib.md5()
# Adapted from https://stackoverflow.com/a/3431835/771663
for root, dirs, files in os.walk(directory):
for name in sorted(files):
filename = os.path.join(root, name)
# TODO: if caching big files becomes an issue, convert this to
# TODO: read in chunks. Currently it's used only for testing
# TODO: purposes.
with open(filename, 'rb') as f:
md5_hash.update(f.read())
return md5_hash.hexdigest()
def touch(path):
"""Creates an empty file at the specified path."""
perms = (os.O_WRONLY | os.O_CREAT | os.O_NONBLOCK | os.O_NOCTTY)
fd = None
try:
fd = os.open(path, perms)
os.utime(path, None)
finally:
if fd is not None:
os.close(fd)
def touchp(path):
"""Like ``touch``, but creates any parent directories needed for the file.
"""
mkdirp(os.path.dirname(path))
touch(path)
def force_symlink(src, dest):
try:
os.symlink(src, dest)
except OSError:
os.remove(dest)
os.symlink(src, dest)
def join_path(prefix, *args):
path = str(prefix)
for elt in args:
path = os.path.join(path, str(elt))
return path
def ancestor(dir, n=1):
"""Get the nth ancestor of a directory."""
parent = os.path.abspath(dir)
for i in range(n):
parent = os.path.dirname(parent)
return parent
def can_access(file_name):
"""True if we have read/write access to the file."""
return os.access(file_name, os.R_OK | os.W_OK)
def traverse_tree(source_root, dest_root, rel_path='', **kwargs):
"""Traverse two filesystem trees simultaneously.
Walks the LinkTree directory in pre or post order. Yields each
file in the source directory with a matching path from the dest
directory, along with whether the file is a directory.
e.g., for this tree::
root/
a/
file1
file2
b/
file3
When called on dest, this yields::
('root', 'dest')
('root/a', 'dest/a')
('root/a/file1', 'dest/a/file1')
('root/a/file2', 'dest/a/file2')
('root/b', 'dest/b')
('root/b/file3', 'dest/b/file3')
Keyword Arguments:
order (str): Whether to do pre- or post-order traversal. Accepted
values are 'pre' and 'post'
ignore (str): Predicate indicating which files to ignore
follow_nonexisting (bool): Whether to descend into directories in
``src`` that do not exit in ``dest``. Default is True
follow_links (bool): Whether to descend into symlinks in ``src``
"""
follow_nonexisting = kwargs.get('follow_nonexisting', True)
follow_links = kwargs.get('follow_link', False)
# Yield in pre or post order?
order = kwargs.get('order', 'pre')
if order not in ('pre', 'post'):
raise ValueError("Order must be 'pre' or 'post'.")
# List of relative paths to ignore under the src root.
ignore = kwargs.get('ignore', lambda filename: False)
# Don't descend into ignored directories
if ignore(rel_path):
return
source_path = os.path.join(source_root, rel_path)
dest_path = os.path.join(dest_root, rel_path)
# preorder yields directories before children
if order == 'pre':
yield (source_path, dest_path)
for f in os.listdir(source_path):
source_child = os.path.join(source_path, f)
dest_child = os.path.join(dest_path, f)
rel_child = os.path.join(rel_path, f)
# Treat as a directory
if os.path.isdir(source_child) and (
follow_links or not os.path.islink(source_child)):
# When follow_nonexisting isn't set, don't descend into dirs
# in source that do not exist in dest
if follow_nonexisting or os.path.exists(dest_child):
tuples = traverse_tree(
source_root, dest_root, rel_child, **kwargs)
for t in tuples:
yield t
# Treat as a file.
elif not ignore(os.path.join(rel_path, f)):
yield (source_child, dest_child)
if order == 'post':
yield (source_path, dest_path)
def set_executable(path):
mode = os.stat(path).st_mode
if mode & stat.S_IRUSR:
mode |= stat.S_IXUSR
if mode & stat.S_IRGRP:
mode |= stat.S_IXGRP
if mode & stat.S_IROTH:
mode |= stat.S_IXOTH
os.chmod(path, mode)
def remove_dead_links(root):
"""Removes any dead link that is present in root.
Parameters:
root (str): path where to search for dead links
"""
for file in os.listdir(root):
path = join_path(root, file)
remove_if_dead_link(path)
def remove_if_dead_link(path):
"""Removes the argument if it is a dead link.
Parameters:
path (str): The potential dead link
"""
if os.path.islink(path):
real_path = os.path.realpath(path)
if not os.path.exists(real_path):
os.unlink(path)
def remove_linked_tree(path):
"""Removes a directory and its contents.
If the directory is a symlink, follows the link and removes the real
directory before removing the link.
Parameters:
path (str): Directory to be removed
"""
if os.path.exists(path):
if os.path.islink(path):
shutil.rmtree(os.path.realpath(path), True)
os.unlink(path)
else:
shutil.rmtree(path, True)
def fix_darwin_install_name(path):
"""Fix install name of dynamic libraries on Darwin to have full path.
There are two parts of this task:
1. Use ``install_name('-id', ...)`` to change install name of a single lib
2. Use ``install_name('-change', ...)`` to change the cross linking between
libs. The function assumes that all libraries are in one folder and
currently won't follow subfolders.
Parameters:
path (str): directory in which .dylib files are located
"""
libs = glob.glob(join_path(path, "*.dylib"))
for lib in libs:
# fix install name first:
install_name_tool = Executable('install_name_tool')
install_name_tool('-id', lib, lib)
otool = Executable('otool')
long_deps = otool('-L', lib, output=str).split('\n')
deps = [dep.partition(' ')[0][1::] for dep in long_deps[2:-1]]
# fix all dependencies:
for dep in deps:
for loc in libs:
# We really want to check for either
# dep == os.path.basename(loc) or
# dep == join_path(builddir, os.path.basename(loc)),
# but we don't know builddir (nor how symbolic links look
# in builddir). We thus only compare the basenames.
if os.path.basename(dep) == os.path.basename(loc):
install_name_tool('-change', dep, loc, lib)
break
def find(root, files, recursive=True):
"""Search for ``files`` starting from the ``root`` directory.
Like GNU/BSD find but written entirely in Python.
Examples:
.. code-block:: console
$ find /usr -name python
is equivalent to:
>>> find('/usr', 'python')
.. code-block:: console
$ find /usr/local/bin -maxdepth 1 -name python
is equivalent to:
>>> find('/usr/local/bin', 'python', recursive=False)
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
root (str): The root directory to start searching from
files (str or collections.Sequence): Library name(s) to search for
recurse (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to True.
Returns:
list of strings: The files that have been found
"""
if isinstance(files, six.string_types):
files = [files]
if recursive:
return _find_recursive(root, files)
else:
return _find_non_recursive(root, files)
def _find_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict. The idea is that
# we want to poke the filesystem as little as possible, but still maintain
# stability in the order of the answer. Thus we are recording each library
# found in a key, and reconstructing the stable order later.
found_files = collections.defaultdict(list)
# Make the path absolute to have os.walk also return an absolute path
root = os.path.abspath(root)
for path, _, list_files in os.walk(root):
for search_file in search_files:
matches = glob.glob(os.path.join(path, search_file))
matches = [os.path.join(path, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
def _find_non_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict as os.list_dir
# can return files in any order (does not preserve stability)
found_files = collections.defaultdict(list)
# Make the path absolute to have absolute path returned
root = os.path.abspath(root)
for search_file in search_files:
matches = glob.glob(os.path.join(root, search_file))
matches = [os.path.join(root, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
# Utilities for libraries and headers
class FileList(collections.Sequence):
"""Sequence of absolute paths to files.
Provides a few convenience methods to manipulate file paths.
"""
def __init__(self, files):
if isinstance(files, six.string_types):
files = [files]
self.files = list(dedupe(files))
@property
def directories(self):
"""Stable de-duplication of the directories where the files reside.
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/libc.a'])
>>> l.directories
['/dir1', '/dir2']
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.directories
['/dir1', '/dir2']
Returns:
list of strings: A list of directories
"""
return list(dedupe(
os.path.dirname(x) for x in self.files if os.path.dirname(x)
))
@property
def basenames(self):
"""Stable de-duplication of the base-names in the list
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.a'])
>>> l.basenames
['liba.a', 'libb.a']
>>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h'])
>>> h.basenames
['a.h', 'b.h']
Returns:
list of strings: A list of base-names
"""
return list(dedupe(os.path.basename(x) for x in self.files))
def __getitem__(self, item):
cls = type(self)
if isinstance(item, numbers.Integral):
return self.files[item]
return cls(self.files[item])
def __add__(self, other):
return self.__class__(dedupe(self.files + list(other)))
def __radd__(self, other):
return self.__add__(other)
def __eq__(self, other):
return self.files == other.files
def __len__(self):
return len(self.files)
def joined(self, separator=' '):
return separator.join(self.files)
def __repr__(self):
return self.__class__.__name__ + '(' + repr(self.files) + ')'
def __str__(self):
return self.joined()
class HeaderList(FileList):
"""Sequence of absolute paths to headers.
Provides a few convenience methods to manipulate header paths and get
commonly used compiler flags or names.
"""
def __init__(self, files):
super(HeaderList, self).__init__(files)
self._macro_definitions = []
@property
def headers(self):
"""Stable de-duplication of the headers.
Returns:
list of strings: A list of header files
"""
return self.files
@property
def names(self):
"""Stable de-duplication of header names in the list without extensions
>>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h'])
>>> h.names
['a', 'b']
Returns:
list of strings: A list of files without extensions
"""
names = []
for x in self.basenames:
name = x
# Valid extensions include: ['.cuh', '.hpp', '.hh', '.h']
for ext in ['.cuh', '.hpp', '.hh', '.h']:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def include_flags(self):
"""Include flags
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.include_flags
'-I/dir1 -I/dir2'
Returns:
str: A joined list of include flags
"""
return ' '.join(['-I' + x for x in self.directories])
@property
def macro_definitions(self):
"""Macro definitions
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.add_macro('-DBOOST_LIB_NAME=boost_regex')
>>> h.add_macro('-DBOOST_DYN_LINK')
>>> h.macro_definitions
'-DBOOST_LIB_NAME=boost_regex -DBOOST_DYN_LINK'
Returns:
str: A joined list of macro definitions
"""
return ' '.join(self._macro_definitions)
@property
def cpp_flags(self):
"""Include flags + macro definitions
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.cpp_flags
'-I/dir1 -I/dir2'
>>> h.add_macro('-DBOOST_DYN_LINK')
>>> h.cpp_flags
'-I/dir1 -I/dir2 -DBOOST_DYN_LINK'
Returns:
str: A joined list of include flags and macro definitions
"""
cpp_flags = self.include_flags
if self.macro_definitions:
cpp_flags += ' ' + self.macro_definitions
return cpp_flags
def add_macro(self, macro):
"""Add a macro definition
Parameters:
macro (str): The macro to add
"""
self._macro_definitions.append(macro)
def find_headers(headers, root, recursive=False):
"""Returns an iterable object containing a list of full paths to
headers if found.
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
headers (str or list of str): Header name(s) to search for
root (str): The root directory to start searching from
recursive (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to False.
Returns:
HeaderList: The headers that have been found
"""
if isinstance(headers, six.string_types):
headers = [headers]
elif not isinstance(headers, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_headers.__name__, type(headers))
raise TypeError(message)
# Construct the right suffix for the headers
suffix = 'h'
# List of headers we are searching with suffixes
headers = ['{0}.{1}'.format(header, suffix) for header in headers]
return HeaderList(find(root, headers, recursive))
class LibraryList(FileList):
"""Sequence of absolute paths to libraries
Provides a few convenience methods to manipulate library paths and get
commonly used compiler flags or names
"""
@property
def libraries(self):
"""Stable de-duplication of library files.
Returns:
list of strings: A list of library files
"""
return self.files
@property
def names(self):
"""Stable de-duplication of library names in the list
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.so'])
>>> l.names
['a', 'b']
Returns:
list of strings: A list of library names
"""
names = []
for x in self.basenames:
name = x
if x.startswith('lib'):
name = x[3:]
# Valid extensions include: ['.dylib', '.so', '.a']
for ext in ['.dylib', '.so', '.a']:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def search_flags(self):
"""Search flags for the libraries
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.search_flags
'-L/dir1 -L/dir2'
Returns:
str: A joined list of search flags
"""
return ' '.join(['-L' + x for x in self.directories])
@property
def link_flags(self):
"""Link flags for the libraries
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.link_flags
'-la -lb'
Returns:
str: A joined list of link flags
"""
return ' '.join(['-l' + name for name in self.names])
@property
def ld_flags(self):
"""Search flags + link flags
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.ld_flags
'-L/dir1 -L/dir2 -la -lb'
Returns:
str: A joined list of search flags and link flags
"""
return self.search_flags + ' ' + self.link_flags
def find_system_libraries(libraries, shared=True):
"""Searches the usual system library locations for ``libraries``.
Search order is as follows:
1. ``/lib64``
2. ``/lib``
3. ``/usr/lib64``
4. ``/usr/lib``
5. ``/usr/local/lib64``
6. ``/usr/local/lib``
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
libraries (str or list of str): Library name(s) to search for
shared (bool, optional): if True searches for shared libraries,
otherwise for static. Defaults to True.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_system_libraries.__name__,
type(libraries))
raise TypeError(message)
libraries_found = []
search_locations = [
'/lib64',
'/lib',
'/usr/lib64',
'/usr/lib',
'/usr/local/lib64',
'/usr/local/lib',
]
for library in libraries:
for root in search_locations:
result = find_libraries(library, root, shared, recursive=True)
if result:
libraries_found += result
break
return libraries_found
def find_libraries(libraries, root, shared=True, recursive=False):
"""Returns an iterable of full paths to libraries found in a root dir.
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
libraries (str or list of str): Library name(s) to search for
root (str): The root directory to start searching from
shared (bool, optional): if True searches for shared libraries,
otherwise for static. Defaults to True.
recursive (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to False.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_libraries.__name__, type(libraries))
raise TypeError(message)
# Construct the right suffix for the library
if shared is True:
suffix = 'dylib' if sys.platform == 'darwin' else 'so'
else:
suffix = 'a'
# List of libraries we are searching with suffixes
libraries = ['{0}.{1}'.format(lib, suffix) for lib in libraries]
return LibraryList(find(root, libraries, recursive))
| lgpl-2.1 |
rhdedgar/openshift-tools | openshift/installer/vendored/openshift-ansible-3.6.173.0.27/roles/lib_openshift/library/oc_route.py | 6 | 63067 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/route -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_route
short_description: Create, modify, and idempotently manage openshift routes.
description:
- Manage openshift route objects programmatically.
options:
state:
description:
- State represents whether to create, modify, delete, or list
required: true
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: str
aliases: []
tls_termination:
description:
- The options for termination. e.g. reencrypt
required: false
default: None
aliases: []
dest_cacert_path:
description:
- The path to the dest_cacert
required: false
default: None
aliases: []
cacert_path:
description:
- The path to the cacert
required: false
default: None
aliases: []
cert_path:
description:
- The path to the cert
required: false
default: None
aliases: []
key_path:
description:
- The path to the key
required: false
default: None
aliases: []
dest_cacert_content:
description:
- The dest_cacert content
required: false
default: None
aliases: []
cacert_content:
description:
- The cacert content
required: false
default: None
aliases: []
cert_content:
description:
- The cert content
required: false
default: None
aliases: []
service_name:
description:
- The name of the service that this route points to.
required: false
default: None
aliases: []
host:
description:
- The host that the route will use. e.g. myapp.x.y.z
required: false
default: None
aliases: []
port:
description:
- The Name of the service port or number of the container port the route will route traffic to
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: Configure certificates for reencrypt route
oc_route:
name: myapproute
namespace: awesomeapp
cert_path: "/etc/origin/master/named_certificates/myapp_cert
key_path: "/etc/origin/master/named_certificates/myapp_key
cacert_path: "/etc/origin/master/named_certificates/myapp_cacert
dest_cacert_content: "{{ dest_cacert_content }}"
service_name: myapp_php
host: myapp.awesomeapp.openshift.com
tls_termination: reencrypt
run_once: true
'''
# -*- -*- -*- End included fragment: doc/route -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/route.py -*- -*- -*-
# noqa: E302,E301
# pylint: disable=too-many-instance-attributes
class RouteConfig(object):
''' Handle route options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
destcacert=None,
cacert=None,
cert=None,
key=None,
host=None,
tls_termination=None,
service_name=None,
wildcard_policy=None,
weight=None,
port=None):
''' constructor for handling route options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.host = host
self.tls_termination = tls_termination
self.destcacert = destcacert
self.cacert = cacert
self.cert = cert
self.key = key
self.service_name = service_name
self.port = port
self.data = {}
self.wildcard_policy = wildcard_policy
if wildcard_policy is None:
self.wildcard_policy = 'None'
self.weight = weight
if weight is None:
self.weight = 100
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Route'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['spec'] = {}
self.data['spec']['host'] = self.host
if self.tls_termination:
self.data['spec']['tls'] = {}
self.data['spec']['tls']['termination'] = self.tls_termination
if self.tls_termination != 'passthrough':
self.data['spec']['tls']['key'] = self.key
self.data['spec']['tls']['caCertificate'] = self.cacert
self.data['spec']['tls']['certificate'] = self.cert
if self.tls_termination == 'reencrypt':
self.data['spec']['tls']['destinationCACertificate'] = self.destcacert
self.data['spec']['to'] = {'kind': 'Service',
'name': self.service_name,
'weight': self.weight}
self.data['spec']['wildcardPolicy'] = self.wildcard_policy
if self.port:
self.data['spec']['port'] = {}
self.data['spec']['port']['targetPort'] = self.port
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Route(Yedit):
''' Class to wrap the oc command line tools '''
wildcard_policy = "spec.wildcardPolicy"
host_path = "spec.host"
port_path = "spec.port.targetPort"
service_path = "spec.to.name"
weight_path = "spec.to.weight"
cert_path = "spec.tls.certificate"
cacert_path = "spec.tls.caCertificate"
destcacert_path = "spec.tls.destinationCACertificate"
termination_path = "spec.tls.termination"
key_path = "spec.tls.key"
kind = 'route'
def __init__(self, content):
'''Route constructor'''
super(Route, self).__init__(content=content)
def get_destcacert(self):
''' return cert '''
return self.get(Route.destcacert_path)
def get_cert(self):
''' return cert '''
return self.get(Route.cert_path)
def get_key(self):
''' return key '''
return self.get(Route.key_path)
def get_cacert(self):
''' return cacert '''
return self.get(Route.cacert_path)
def get_service(self):
''' return service name '''
return self.get(Route.service_path)
def get_weight(self):
''' return service weight '''
return self.get(Route.weight_path)
def get_termination(self):
''' return tls termination'''
return self.get(Route.termination_path)
def get_host(self):
''' return host '''
return self.get(Route.host_path)
def get_port(self):
''' return port '''
return self.get(Route.port_path)
def get_wildcard_policy(self):
''' return wildcardPolicy '''
return self.get(Route.wildcard_policy)
# -*- -*- -*- End included fragment: lib/route.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_route.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCRoute(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'route'
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCRoute, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self._route = None
@property
def route(self):
''' property function for route'''
if not self._route:
self.get()
return self._route
@route.setter
def route(self, data):
''' setter function for route '''
self._route = data
def exists(self):
''' return whether a route exists '''
if self.route:
return True
return False
def get(self):
'''return route information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.route = Route(content=result['results'][0])
elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
return self._replace_content(self.kind,
self.config.name,
self.config.data,
force=(self.config.host != self.route.get_host()))
def needs_update(self):
''' verify an update is needed '''
skip = []
return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=self.verbose)
@staticmethod
def get_cert_data(path, content):
'''get the data for a particular value'''
if not path and not content:
return None
rval = None
if path and os.path.exists(path) and os.access(path, os.R_OK):
rval = open(path).read()
elif content:
rval = content
return rval
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode=False):
''' run the idempotent asnible code
params comes from the ansible portion for this module
files: a dictionary for the certificates
{'cert': {'path': '',
'content': '',
'value': ''
}
}
check_mode: does the module support check mode. (module.check_mode)
'''
files = {'destcacert': {'path': params['dest_cacert_path'],
'content': params['dest_cacert_content'],
'value': None, },
'cacert': {'path': params['cacert_path'],
'content': params['cacert_content'],
'value': None, },
'cert': {'path': params['cert_path'],
'content': params['cert_content'],
'value': None, },
'key': {'path': params['key_path'],
'content': params['key_content'],
'value': None, }, }
if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough': # E501
for key, option in files.items():
if key == 'destcacert' and params['tls_termination'] != 'reencrypt':
continue
option['value'] = OCRoute.get_cert_data(option['path'], option['content']) # E501
if not option['value']:
return {'failed': True,
'msg': 'Verify that you pass a value for %s' % key}
rconfig = RouteConfig(params['name'],
params['namespace'],
params['kubeconfig'],
files['destcacert']['value'],
files['cacert']['value'],
files['cert']['value'],
files['key']['value'],
params['host'],
params['tls_termination'],
params['service_name'],
params['wildcard_policy'],
params['weight'],
params['port'])
oc_route = OCRoute(rconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_route.get()
#####
# Get
#####
if state == 'list':
return {'changed': False,
'results': api_rval['results'],
'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_route.exists():
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
api_rval = oc_route.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_route.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
# Create it here
api_rval = oc_route.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
# return the created object
api_rval = oc_route.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
########
# Update
########
if oc_route.needs_update():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
api_rval = oc_route.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
# return the created object
api_rval = oc_route.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
return {'changed': False, 'results': api_rval, 'state': "present"}
# catch all
return {'failed': True, 'msg': "Unknown State passed"}
# -*- -*- -*- End included fragment: class/oc_route.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_route.py -*- -*- -*-
# pylint: disable=too-many-branches
def main():
'''
ansible oc module for route
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
tls_termination=dict(default=None, type='str'),
dest_cacert_path=dict(default=None, type='str'),
cacert_path=dict(default=None, type='str'),
cert_path=dict(default=None, type='str'),
key_path=dict(default=None, type='str'),
dest_cacert_content=dict(default=None, type='str'),
cacert_content=dict(default=None, type='str'),
cert_content=dict(default=None, type='str'),
key_content=dict(default=None, type='str'),
service_name=dict(default=None, type='str'),
host=dict(default=None, type='str'),
wildcard_policy=dict(default=None, type='str'),
weight=dict(default=None, type='int'),
port=dict(default=None, type='int'),
),
mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
('cacert_path', 'cacert_content'),
('cert_path', 'cert_content'),
('key_path', 'key_content'), ],
supports_check_mode=True,
)
results = OCRoute.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_route.py -*- -*- -*-
| apache-2.0 |
ISN-LYSTCHA17/glowing-invention | personnalize.py | 1 | 2940 | # import drawing lib
import pygame
# pygame constants as events' constants
from pygame.locals import *
# game constants
from constants import *
from buttonwimage import ButtonWImage
import glob
import os
import textentry
from button import Button
import shutil
class Personnalize:
def __init__(self, win):
self.running = False
self.win = win
self.dbox = textentry.TextBox(self.win, font=pygame.font.SysFont("arial", 18), sy=22, x=((WIDTH - 120) // 2), y=HEIGHT - 32)
self.btns = []
x, y = 200, 20
i = 0
for folder in sorted(glob.glob("gfx/personnalize/*")):
self.btns.append(ButtonWImage(x, y, 64, 64, folder + "/front.png", (128, 48, 120)))
i += 1
if i == 5:
y = 20
x = WIDTH - 264
else:
y += 74
self.valid_btn = Button((WIDTH - 80) // 2, (HEIGHT - 32), 50, 22, "Valider", (12, 200, 35), pygame.font.SysFont("arial", 18), (0, 0, 0))
self.has_valid = False
self.selected = -1
def load(self):
self.running = True
def update(self):
pass
def render(self):
pygame.draw.rect(self.win, (20, 175, 170), (0, 0) + self.win.get_size())
for btn in self.btns:
btn.render(self.win)
if not self.has_valid:
self.valid_btn.render(self.win)
else:
self.dbox.mainloop()
def create_game(self):
with open("saves/game", "w") as file:
file.write(self.dbox.input)
folder = sorted(glob.glob("gfx/personnalize/*"))[self.selected]
for f in glob.glob(folder + "/*.png"):
if os.path.exists("gfx/player/" + os.path.basename(f)):
os.remove("gfx/player/" + os.path.basename(f))
shutil.copyfile(f, "gfx/player/" + os.path.basename(f))
def run(self):
while self.running:
for ev in pygame.event.get():
if ev.type == QUIT:
self.running = False
elif ev.type == MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
for i, btn in enumerate(self.btns):
if btn.collide(x, y):
if self.selected != -1:
self.btns[self.selected].color = (128, 48, 120)
self.selected = i
btn.color = (50, 120, 50)
break
if not self.has_valid:
if self.valid_btn.collide(x, y):
self.has_valid = True
if not self.dbox.is_running():
self.create_game()
self.running = False
self.update()
self.render()
pygame.display.flip()
| gpl-3.0 |
MiltosD/CEFELRC | lib/python2.7/site-packages/dateutil/easter.py | 291 | 2633 | """
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
| bsd-3-clause |
gooddata/openstack-nova | nova/tests/unit/conf/test_neutron.py | 6 | 1171 | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova import test
CONF = nova.conf.CONF
class NeutronConfTestCase(test.NoDBTestCase):
def test_register_dynamic_opts(self):
self.flags(physnets=['foo', 'bar', 'baz'], group='neutron')
self.assertNotIn('neutron_physnet_foo', CONF)
self.assertNotIn('neutron_physnet_bar', CONF)
nova.conf.neutron.register_dynamic_opts(CONF)
self.assertIn('neutron_physnet_foo', CONF)
self.assertIn('neutron_physnet_bar', CONF)
self.assertIn('neutron_tunnel', CONF)
self.assertIn('numa_nodes', CONF.neutron_tunnel)
| apache-2.0 |
eteamin/spell_checker_web_api | scwapi/model/__init__.py | 1 | 2402 | # -*- coding: utf-8 -*-
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
maker = sessionmaker(autoflush=True, autocommit=False,
extension=ZopeTransactionExtension())
DBSession = scoped_session(maker)
# Base class for all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
# from sqlalchemy import MetaData
# metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
######
def init_model(engine):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure(bind=engine)
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outside tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
#
# global t_reflected
# t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
# mapper(Reflected, t_reflected)
# Import your model modules here.
from scwapi.model.auth import User, Group, Permission
__all__ = ('User', 'Group', 'Permission')
| gpl-3.0 |
m00re/ns-3-stdma | sources/src/netanim/bindings/modulegen__gcc_LP64.py | 354 | 264513 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.netanim', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE'], import_from_module='ns.core')
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo [class]
module.add_class('AnimPacketInfo')
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo [class]
module.add_class('AnimRxInfo')
## animation-interface.h (module 'netanim'): ns3::AnimationInterface [class]
module.add_class('AnimationInterface')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent', import_from_module='ns.core')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## node-list.h (module 'network'): ns3::NodeList [class]
module.add_class('NodeList', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## animation-interface.h (module 'netanim'): ns3::AnimByteTag [class]
module.add_class('AnimByteTag', parent=root_module['ns3::Tag'])
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mobility-model.h (module 'mobility'): ns3::MobilityModel [class]
module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_container('std::map< unsigned int, ns3::AnimRxInfo >', ('unsigned int', 'ns3::AnimRxInfo'), container_type='map')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *', 'ns3::LogTimePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) **', 'ns3::LogTimePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *&', 'ns3::LogTimePrinter&')
typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue')
typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*')
typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *', 'ns3::LogNodePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) **', 'ns3::LogNodePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *&', 'ns3::LogNodePrinter&')
typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector')
typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*')
typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker')
typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*')
typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
## config.h (module 'core'): ns3::Config::MatchContainer [class]
module.add_class('MatchContainer', import_from_module='ns.core')
module.add_container('std::vector< ns3::Ptr< ns3::Object > >', 'ns3::Ptr< ns3::Object >', container_type='vector')
module.add_container('std::vector< std::string >', 'std::string', container_type='vector')
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AnimPacketInfo_methods(root_module, root_module['ns3::AnimPacketInfo'])
register_Ns3AnimRxInfo_methods(root_module, root_module['ns3::AnimRxInfo'])
register_Ns3AnimationInterface_methods(root_module, root_module['ns3::AnimationInterface'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3NodeList_methods(root_module, root_module['ns3::NodeList'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3AnimByteTag_methods(root_module, root_module['ns3::AnimByteTag'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3ConfigMatchContainer_methods(root_module, root_module['ns3::Config::MatchContainer'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AnimPacketInfo_methods(root_module, cls):
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::AnimPacketInfo(ns3::AnimPacketInfo const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AnimPacketInfo const &', 'arg0')])
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::AnimPacketInfo() [constructor]
cls.add_constructor([])
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::AnimPacketInfo(ns3::Ptr<ns3::NetDevice const> tx_nd, ns3::Time const & fbTx, ns3::Time const & lbTx, ns3::Vector txLoc, uint32_t txNodeId=0) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice const >', 'tx_nd'), param('ns3::Time const &', 'fbTx'), param('ns3::Time const &', 'lbTx'), param('ns3::Vector', 'txLoc'), param('uint32_t', 'txNodeId', default_value='0')])
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo ns3::AnimPacketInfo::GetRxInfo(ns3::Ptr<ns3::NetDevice const> nd) [member function]
cls.add_method('GetRxInfo',
'ns3::AnimRxInfo',
[param('ns3::Ptr< ns3::NetDevice const >', 'nd')])
## animation-interface-helper.h (module 'netanim'): void ns3::AnimPacketInfo::ProcessRxBegin(ns3::Ptr<ns3::NetDevice const> nd, ns3::Time const & fbRx) [member function]
cls.add_method('ProcessRxBegin',
'void',
[param('ns3::Ptr< ns3::NetDevice const >', 'nd'), param('ns3::Time const &', 'fbRx')])
## animation-interface-helper.h (module 'netanim'): void ns3::AnimPacketInfo::ProcessRxDrop(ns3::Ptr<ns3::NetDevice const> nd) [member function]
cls.add_method('ProcessRxDrop',
'void',
[param('ns3::Ptr< ns3::NetDevice const >', 'nd')])
## animation-interface-helper.h (module 'netanim'): bool ns3::AnimPacketInfo::ProcessRxEnd(ns3::Ptr<ns3::NetDevice const> nd, ns3::Time const & fbRx, ns3::Vector rxLoc) [member function]
cls.add_method('ProcessRxEnd',
'bool',
[param('ns3::Ptr< ns3::NetDevice const >', 'nd'), param('ns3::Time const &', 'fbRx'), param('ns3::Vector', 'rxLoc')])
## animation-interface-helper.h (module 'netanim'): void ns3::AnimPacketInfo::RemoveRxInfo(ns3::Ptr<ns3::NetDevice const> nd) [member function]
cls.add_method('RemoveRxInfo',
'void',
[param('ns3::Ptr< ns3::NetDevice const >', 'nd')])
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::firstlastbitDelta [variable]
cls.add_instance_attribute('firstlastbitDelta', 'double', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::m_fbTx [variable]
cls.add_instance_attribute('m_fbTx', 'double', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::m_lbTx [variable]
cls.add_instance_attribute('m_lbTx', 'double', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::m_rx [variable]
cls.add_instance_attribute('m_rx', 'std::map< unsigned int, ns3::AnimRxInfo >', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::m_txLoc [variable]
cls.add_instance_attribute('m_txLoc', 'ns3::Vector', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::m_txNodeId [variable]
cls.add_instance_attribute('m_txNodeId', 'uint32_t', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimPacketInfo::m_txnd [variable]
cls.add_instance_attribute('m_txnd', 'ns3::Ptr< ns3::NetDevice const >', is_const=False)
return
def register_Ns3AnimRxInfo_methods(root_module, cls):
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo::AnimRxInfo(ns3::AnimRxInfo const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AnimRxInfo const &', 'arg0')])
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo::AnimRxInfo() [constructor]
cls.add_constructor([])
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo::AnimRxInfo(ns3::Time const & fbRx, ns3::Ptr<ns3::NetDevice const> nd, double rxRange) [constructor]
cls.add_constructor([param('ns3::Time const &', 'fbRx'), param('ns3::Ptr< ns3::NetDevice const >', 'nd'), param('double', 'rxRange')])
## animation-interface-helper.h (module 'netanim'): bool ns3::AnimRxInfo::IsPhyRxComplete() [member function]
cls.add_method('IsPhyRxComplete',
'bool',
[])
## animation-interface-helper.h (module 'netanim'): void ns3::AnimRxInfo::SetPhyRxComplete() [member function]
cls.add_method('SetPhyRxComplete',
'void',
[])
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo::m_fbRx [variable]
cls.add_instance_attribute('m_fbRx', 'double', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo::m_lbRx [variable]
cls.add_instance_attribute('m_lbRx', 'double', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo::m_rxnd [variable]
cls.add_instance_attribute('m_rxnd', 'ns3::Ptr< ns3::NetDevice const >', is_const=False)
## animation-interface-helper.h (module 'netanim'): ns3::AnimRxInfo::rxRange [variable]
cls.add_instance_attribute('rxRange', 'double', is_const=False)
return
def register_Ns3AnimationInterface_methods(root_module, cls):
## animation-interface.h (module 'netanim'): ns3::AnimationInterface::AnimationInterface(ns3::AnimationInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AnimationInterface const &', 'arg0')])
## animation-interface.h (module 'netanim'): ns3::AnimationInterface::AnimationInterface() [constructor]
cls.add_constructor([])
## animation-interface.h (module 'netanim'): ns3::AnimationInterface::AnimationInterface(std::string const filename, uint64_t maxPktsPerFile=100000, bool usingXML=true) [constructor]
cls.add_constructor([param('std::string const', 'filename'), param('uint64_t', 'maxPktsPerFile', default_value='100000'), param('bool', 'usingXML', default_value='true')])
## animation-interface.h (module 'netanim'): ns3::AnimationInterface::AnimationInterface(uint16_t port, bool usingXML=true) [constructor]
cls.add_constructor([param('uint16_t', 'port'), param('bool', 'usingXML', default_value='true')])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::EnablePacketMetadata(bool enable) [member function]
cls.add_method('EnablePacketMetadata',
'void',
[param('bool', 'enable')])
## animation-interface.h (module 'netanim'): uint64_t ns3::AnimationInterface::GetTracePktCount() [member function]
cls.add_method('GetTracePktCount',
'uint64_t',
[])
## animation-interface.h (module 'netanim'): static bool ns3::AnimationInterface::IsInitialized() [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_static=True)
## animation-interface.h (module 'netanim'): bool ns3::AnimationInterface::IsStarted() [member function]
cls.add_method('IsStarted',
'bool',
[])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::ResetAnimWriteCallback() [member function]
cls.add_method('ResetAnimWriteCallback',
'void',
[])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::SetAnimWriteCallback(void (*)( char const * ) * cb) [member function]
cls.add_method('SetAnimWriteCallback',
'void',
[param('void ( * ) ( char const * ) *', 'cb')])
## animation-interface.h (module 'netanim'): static void ns3::AnimationInterface::SetConstantPosition(ns3::Ptr<ns3::Node> n, double x, double y, double z=0) [member function]
cls.add_method('SetConstantPosition',
'void',
[param('ns3::Ptr< ns3::Node >', 'n'), param('double', 'x'), param('double', 'y'), param('double', 'z', default_value='0')],
is_static=True)
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::SetMobilityPollInterval(ns3::Time t) [member function]
cls.add_method('SetMobilityPollInterval',
'void',
[param('ns3::Time', 't')])
## animation-interface.h (module 'netanim'): static void ns3::AnimationInterface::SetNodeDescription(ns3::Ptr<ns3::Node> n, std::string descr) [member function]
cls.add_method('SetNodeDescription',
'void',
[param('ns3::Ptr< ns3::Node >', 'n'), param('std::string', 'descr')],
is_static=True)
## animation-interface.h (module 'netanim'): static void ns3::AnimationInterface::SetNodeDescription(ns3::NodeContainer nc, std::string descr) [member function]
cls.add_method('SetNodeDescription',
'void',
[param('ns3::NodeContainer', 'nc'), param('std::string', 'descr')],
is_static=True)
## animation-interface.h (module 'netanim'): bool ns3::AnimationInterface::SetOutputFile(std::string const & fn) [member function]
cls.add_method('SetOutputFile',
'bool',
[param('std::string const &', 'fn')])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::SetRandomPosition(bool setRandPos) [member function]
cls.add_method('SetRandomPosition',
'void',
[param('bool', 'setRandPos')])
## animation-interface.h (module 'netanim'): bool ns3::AnimationInterface::SetServerPort(uint16_t port) [member function]
cls.add_method('SetServerPort',
'bool',
[param('uint16_t', 'port')])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::SetStartTime(ns3::Time t) [member function]
cls.add_method('SetStartTime',
'void',
[param('ns3::Time', 't')])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::SetStopTime(ns3::Time t) [member function]
cls.add_method('SetStopTime',
'void',
[param('ns3::Time', 't')])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::SetXMLOutput() [member function]
cls.add_method('SetXMLOutput',
'void',
[])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::ShowAll802_11(bool showAll) [member function]
cls.add_method('ShowAll802_11',
'void',
[param('bool', 'showAll')])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::StartAnimation(bool restart=false) [member function]
cls.add_method('StartAnimation',
'void',
[param('bool', 'restart', default_value='false')])
## animation-interface.h (module 'netanim'): void ns3::AnimationInterface::StopAnimation() [member function]
cls.add_method('StopAnimation',
'void',
[])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[])
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponent::EnvVarCheck(char const * name) [member function]
cls.add_method('EnvVarCheck',
'void',
[param('char const *', 'name')])
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeList_methods(root_module, cls):
## node-list.h (module 'network'): ns3::NodeList::NodeList() [constructor]
cls.add_constructor([])
## node-list.h (module 'network'): ns3::NodeList::NodeList(ns3::NodeList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeList const &', 'arg0')])
## node-list.h (module 'network'): static uint32_t ns3::NodeList::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'uint32_t',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_static=True)
## node-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeList::Begin() [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_static=True)
## node-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeList::End() [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_static=True)
## node-list.h (module 'network'): static uint32_t ns3::NodeList::GetNNodes() [member function]
cls.add_method('GetNNodes',
'uint32_t',
[],
is_static=True)
## node-list.h (module 'network'): static ns3::Ptr<ns3::Node> ns3::NodeList::GetNode(uint32_t n) [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'n')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3AnimByteTag_methods(root_module, cls):
## animation-interface.h (module 'netanim'): ns3::AnimByteTag::AnimByteTag() [constructor]
cls.add_constructor([])
## animation-interface.h (module 'netanim'): ns3::AnimByteTag::AnimByteTag(ns3::AnimByteTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AnimByteTag const &', 'arg0')])
## animation-interface.h (module 'netanim'): void ns3::AnimByteTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## animation-interface.h (module 'netanim'): uint64_t ns3::AnimByteTag::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## animation-interface.h (module 'netanim'): ns3::TypeId ns3::AnimByteTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## animation-interface.h (module 'netanim'): uint32_t ns3::AnimByteTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## animation-interface.h (module 'netanim'): static ns3::TypeId ns3::AnimByteTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## animation-interface.h (module 'netanim'): void ns3::AnimByteTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## animation-interface.h (module 'netanim'): void ns3::AnimByteTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## animation-interface.h (module 'netanim'): void ns3::AnimByteTag::Set(uint64_t AnimUid) [member function]
cls.add_method('Set',
'void',
[param('uint64_t', 'AnimUid')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3MobilityModel_methods(root_module, cls):
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel(ns3::MobilityModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MobilityModel const &', 'arg0')])
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel() [constructor]
cls.add_constructor([])
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetDistanceFrom(ns3::Ptr<const ns3::MobilityModel> position) const [member function]
cls.add_method('GetDistanceFrom',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'position')],
is_const=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetPosition() const [member function]
cls.add_method('GetPosition',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetRelativeSpeed(ns3::Ptr<const ns3::MobilityModel> other) const [member function]
cls.add_method('GetRelativeSpeed',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'other')],
is_const=True)
## mobility-model.h (module 'mobility'): static ns3::TypeId ns3::MobilityModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetVelocity() const [member function]
cls.add_method('GetVelocity',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::SetPosition(ns3::Vector const & position) [member function]
cls.add_method('SetPosition',
'void',
[param('ns3::Vector const &', 'position')])
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::NotifyCourseChange() const [member function]
cls.add_method('NotifyCourseChange',
'void',
[],
is_const=True, visibility='protected')
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetPosition() const [member function]
cls.add_method('DoGetPosition',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetVelocity() const [member function]
cls.add_method('DoGetVelocity',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::DoSetPosition(ns3::Vector const & position) [member function]
cls.add_method('DoSetPosition',
'void',
[param('ns3::Vector const &', 'position')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3ConfigMatchContainer_methods(root_module, cls):
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer(ns3::Config::MatchContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Config::MatchContainer const &', 'arg0')])
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer() [constructor]
cls.add_constructor([])
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer(std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > const & objects, std::vector<std::string, std::allocator<std::string> > const & contexts, std::string path) [constructor]
cls.add_constructor([param('std::vector< ns3::Ptr< ns3::Object > > const &', 'objects'), param('std::vector< std::string > const &', 'contexts'), param('std::string', 'path')])
## config.h (module 'core'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::Config::MatchContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## config.h (module 'core'): void ns3::Config::MatchContainer::Connect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Connect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::ConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::Disconnect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Disconnect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::DisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::Config::MatchContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## config.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Config::MatchContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')],
is_const=True)
## config.h (module 'core'): std::string ns3::Config::MatchContainer::GetMatchedPath(uint32_t i) const [member function]
cls.add_method('GetMatchedPath',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## config.h (module 'core'): uint32_t ns3::Config::MatchContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## config.h (module 'core'): std::string ns3::Config::MatchContainer::GetPath() const [member function]
cls.add_method('GetPath',
'std::string',
[],
is_const=True)
## config.h (module 'core'): void ns3::Config::MatchContainer::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
rbheromax/android_kernel_htc_a11 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
LUTAN/tensorflow | tensorflow/python/util/decorator_utils_test.py | 139 | 4197 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""decorator_utils tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
def _test_function(unused_arg=0):
pass
class GetQualifiedNameTest(test.TestCase):
def test_method(self):
self.assertEqual(
"GetQualifiedNameTest.test_method",
decorator_utils.get_qualified_name(GetQualifiedNameTest.test_method))
def test_function(self):
self.assertEqual("_test_function",
decorator_utils.get_qualified_name(_test_function))
class AddNoticeToDocstringTest(test.TestCase):
def _check(self, doc, expected):
self.assertEqual(
decorator_utils.add_notice_to_docstring(
doc=doc,
instructions="Instructions",
no_doc_str="Nothing here",
suffix_str="(suffix)",
notice=["Go away"]),
expected)
def test_regular(self):
expected = ("Brief (suffix)\n\nGo away\nInstructions\n\nDocstring\n\n"
"Args:\n arg1: desc")
# No indent for main docstring
self._check("Brief\n\nDocstring\n\nArgs:\n arg1: desc", expected)
# 2 space indent for main docstring, blank lines not indented
self._check("Brief\n\n Docstring\n\n Args:\n arg1: desc", expected)
# 2 space indent for main docstring, blank lines indented as well.
self._check("Brief\n \n Docstring\n \n Args:\n arg1: desc", expected)
# No indent for main docstring, first line blank.
self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc",
expected)
# 2 space indent, first line blank.
self._check("\n Brief\n \n Docstring\n \n Args:\n arg1: desc",
expected)
def test_brief_only(self):
expected = "Brief (suffix)\n\nGo away\nInstructions"
self._check("Brief", expected)
self._check("Brief\n", expected)
self._check("Brief\n ", expected)
self._check("\nBrief\n ", expected)
self._check("\n Brief\n ", expected)
def test_no_docstring(self):
expected = "Nothing here\n\nGo away\nInstructions"
self._check(None, expected)
self._check("", expected)
def test_no_empty_line(self):
expected = "Brief (suffix)\n\nGo away\nInstructions\n\nDocstring"
# No second line indent
self._check("Brief\nDocstring", expected)
# 2 space second line indent
self._check("Brief\n Docstring", expected)
# No second line indent, first line blank
self._check("\nBrief\nDocstring", expected)
# 2 space second line indent, first line blank
self._check("\n Brief\n Docstring", expected)
class ValidateCallableTest(test.TestCase):
def test_function(self):
decorator_utils.validate_callable(_test_function, "test")
def test_method(self):
decorator_utils.validate_callable(self.test_method, "test")
def test_callable(self):
class TestClass(object):
def __call__(self):
pass
decorator_utils.validate_callable(TestClass(), "test")
def test_partial(self):
partial = functools.partial(_test_function, unused_arg=7)
decorator_utils.validate_callable(partial, "test")
def test_fail_non_callable(self):
x = 0
self.assertRaises(ValueError, decorator_utils.validate_callable, x, "test")
if __name__ == "__main__":
test.main()
| apache-2.0 |
thnee/ansible | lib/ansible/modules/cloud/cloudstack/cs_instance_nic.py | 13 | 7635 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, Marc-Aurèle Brothier @marcaurele
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_instance_nic
short_description: Manages NICs of an instance on Apache CloudStack based clouds.
description:
- Add and remove nic to and from network
version_added: '2.4'
author:
- Marc-Aurèle Brothier (@marcaurele)
- René Moser (@resmo)
options:
vm:
description:
- Name of instance.
required: true
type: str
aliases: [ name ]
network:
description:
- Name of the network.
type: str
required: true
ip_address:
description:
- IP address to be used for the nic.
type: str
vpc:
description:
- Name of the VPC the I(vm) is related to.
type: str
domain:
description:
- Domain the instance is related to.
type: str
account:
description:
- Account the instance is related to.
type: str
project:
description:
- Name of the project the instance is deployed in.
type: str
zone:
description:
- Name of the zone in which the instance is deployed in.
- If not set, default zone is used.
type: str
state:
description:
- State of the nic.
type: str
default: present
choices: [ present, absent ]
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Add a nic on another network
cs_instance_nic:
vm: privnet
network: privNetForBasicZone
delegate_to: localhost
- name: Ensure IP address on a nic
cs_instance_nic:
vm: privnet
ip_address: 10.10.11.32
network: privNetForBasicZone
delegate_to: localhost
- name: Remove a secondary nic
cs_instance_nic:
vm: privnet
state: absent
network: privNetForBasicZone
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the nic.
returned: success
type: str
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
vm:
description: Name of the VM.
returned: success
type: str
sample: web-01
ip_address:
description: Primary IP of the NIC.
returned: success
type: str
sample: 10.10.10.10
netmask:
description: Netmask of the NIC.
returned: success
type: str
sample: 255.255.255.0
mac_address:
description: MAC address of the NIC.
returned: success
type: str
sample: 02:00:33:31:00:e4
network:
description: Name of the network if not default.
returned: success
type: str
sample: sync network
domain:
description: Domain the VM is related to.
returned: success
type: str
sample: example domain
account:
description: Account the VM is related to.
returned: success
type: str
sample: example account
project:
description: Name of project the VM is related to.
returned: success
type: str
sample: Production
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (AnsibleCloudStack,
cs_argument_spec,
cs_required_together)
class AnsibleCloudStackInstanceNic(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstanceNic, self).__init__(module)
self.nic = None
self.returns = {
'ipaddress': 'ip_address',
'macaddress': 'mac_address',
'netmask': 'netmask',
}
def get_nic(self):
if self.nic:
return self.nic
args = {
'virtualmachineid': self.get_vm(key='id'),
'networkid': self.get_network(key='id'),
}
nics = self.query_api('listNics', **args)
if nics:
self.nic = nics['nic'][0]
return self.nic
return None
def get_nic_from_result(self, result):
for nic in result.get('nic') or []:
if nic['networkid'] == self.get_network(key='id'):
return nic
def add_nic(self):
self.result['changed'] = True
args = {
'virtualmachineid': self.get_vm(key='id'),
'networkid': self.get_network(key='id'),
'ipaddress': self.module.params.get('ip_address'),
}
if not self.module.check_mode:
res = self.query_api('addNicToVirtualMachine', **args)
if self.module.params.get('poll_async'):
vm = self.poll_job(res, 'virtualmachine')
self.nic = self.get_nic_from_result(result=vm)
return self.nic
def update_nic(self, nic):
# Do not try to update if no IP address is given
ip_address = self.module.params.get('ip_address')
if not ip_address:
return nic
args = {
'nicid': nic['id'],
'ipaddress': ip_address,
}
if self.has_changed(args, nic, ['ipaddress']):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateVmNicIp', **args)
if self.module.params.get('poll_async'):
vm = self.poll_job(res, 'virtualmachine')
self.nic = self.get_nic_from_result(result=vm)
return self.nic
def remove_nic(self, nic):
self.result['changed'] = True
args = {
'virtualmachineid': self.get_vm(key='id'),
'nicid': nic['id'],
}
if not self.module.check_mode:
res = self.query_api('removeNicFromVirtualMachine', **args)
if self.module.params.get('poll_async'):
self.poll_job(res, 'virtualmachine')
return nic
def present_nic(self):
nic = self.get_nic()
if not nic:
nic = self.add_nic()
else:
nic = self.update_nic(nic)
return nic
def absent_nic(self):
nic = self.get_nic()
if nic:
return self.remove_nic(nic)
return nic
def get_result(self, nic):
super(AnsibleCloudStackInstanceNic, self).get_result(nic)
if nic and not self.module.params.get('network'):
self.module.params['network'] = nic.get('networkid')
self.result['network'] = self.get_network(key='name')
self.result['vm'] = self.get_vm(key='name')
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
vm=dict(required=True, aliases=['name']),
network=dict(required=True),
vpc=dict(),
ip_address=dict(),
state=dict(choices=['present', 'absent'], default='present'),
domain=dict(),
account=dict(),
project=dict(),
zone=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True,
)
acs_nic = AnsibleCloudStackInstanceNic(module)
state = module.params.get('state')
if state == 'absent':
nic = acs_nic.absent_nic()
else:
nic = acs_nic.present_nic()
result = acs_nic.get_result(nic)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
haowu80s/spark | examples/src/main/python/mllib/recommendation_example.py | 3 | 2066 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Collaborative Filtering Classification Example.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
# $example on$
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonCollaborativeFilteringExample")
# $example on$
# Load and parse the data
data = sc.textFile("data/mllib/als/test.data")
ratings = data.map(lambda l: l.split(','))\
.map(lambda l: Rating(int(l[0]), int(l[1]), float(l[2])))
# Build the recommendation model using Alternating Least Squares
rank = 10
numIterations = 10
model = ALS.train(ratings, rank, numIterations)
# Evaluate the model on training data
testdata = ratings.map(lambda p: (p[0], p[1]))
predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))
ratesAndPreds = ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean()
print("Mean Squared Error = " + str(MSE))
# Save and load model
model.save(sc, "target/tmp/myCollaborativeFilter")
sameModel = MatrixFactorizationModel.load(sc, "target/tmp/myCollaborativeFilter")
# $example off$
| apache-2.0 |
sirchia/CouchPotatoServer | libs/requests/sessions.py | 31 | 9484 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
from .defaults import defaults
from .models import Request
from .hooks import dispatch_hook
from .utils import header_expand
from .packages.urllib3.poolmanager import PoolManager
def merge_kwargs(local_kwarg, default_kwarg):
"""Merges kwarg dictionaries.
If a local key in the dictionary is set to None, it will be removed.
"""
if default_kwarg is None:
return local_kwarg
if isinstance(local_kwarg, str):
return local_kwarg
if local_kwarg is None:
return default_kwarg
# Bypass if not a dictionary (e.g. timeout)
if not hasattr(default_kwarg, 'items'):
return local_kwarg
# Update new values.
kwargs = default_kwarg.copy()
kwargs.update(local_kwarg)
# Remove keys that are set to None.
for (k, v) in list(local_kwarg.items()):
if v is None:
del kwargs[k]
return kwargs
class Session(object):
"""A Requests session."""
__attrs__ = [
'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',
'params', 'config', 'verify', 'cert']
def __init__(self,
headers=None,
cookies=None,
auth=None,
timeout=None,
proxies=None,
hooks=None,
params=None,
config=None,
prefetch=False,
verify=True,
cert=None):
self.headers = headers or {}
self.cookies = cookies or {}
self.auth = auth
self.timeout = timeout
self.proxies = proxies or {}
self.hooks = hooks or {}
self.params = params or {}
self.config = config or {}
self.prefetch = prefetch
self.verify = verify
self.cert = cert
for (k, v) in list(defaults.items()):
self.config.setdefault(k, v)
self.init_poolmanager()
# Set up a CookieJar to be used by default
self.cookies = {}
# Add passed cookies in.
if cookies is not None:
self.cookies.update(cookies)
def init_poolmanager(self):
self.poolmanager = PoolManager(
num_pools=self.config.get('pool_connections'),
maxsize=self.config.get('pool_maxsize')
)
def __repr__(self):
return '<requests-client at 0x%x>' % (id(self))
def __enter__(self):
return self
def __exit__(self, *args):
pass
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
return_response=True,
config=None,
prefetch=False,
verify=None,
cert=None):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True by default.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param return_response: (optional) If False, an un-sent Request object will returned.
:param config: (optional) A configuration dictionary.
:param prefetch: (optional) if ``True``, the response content will be immediately downloaded.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
"""
method = str(method).upper()
# Default empty dicts for dict params.
cookies = {} if cookies is None else cookies
data = {} if data is None else data
files = {} if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
prefetch = self.prefetch or prefetch
# use session's hooks as defaults
for key, cb in list(self.hooks.items()):
hooks.setdefault(key, cb)
# Expand header values.
if headers:
for k, v in list(headers.items()) or {}:
headers[k] = header_expand(v)
args = dict(
method=method,
url=url,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
hooks=hooks,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
config=config,
verify=verify,
cert=cert,
_poolmanager=self.poolmanager
)
# Merge local kwargs with session kwargs.
for attr in self.__attrs__:
session_val = getattr(self, attr, None)
local_val = args.get(attr)
args[attr] = merge_kwargs(local_val, session_val)
# Arguments manipulation hook.
args = dispatch_hook('args', args['hooks'], args)
# Create the (empty) response.
r = Request(**args)
# Give the response some context.
r.session = self
# Don't send if asked nicely.
if not return_response:
return r
# Send the HTTP Request.
r.send(prefetch=prefetch)
# Send any cookies back up the to the session.
self.cookies.update(r.response.cookies)
# Return the response.
return r.response
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('get', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('options', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('head', url, **kwargs)
def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('post', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('put', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('patch', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('delete', url, **kwargs)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager()
def session(**kwargs):
"""Returns a :class:`Session` for context-management."""
return Session(**kwargs)
| gpl-3.0 |
gnulinooks/sympy | sympy/thirdparty/pyglet/pyglet/font/carbon.py | 4 | 12954 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
# TODO Tiger and later: need to set kWindowApplicationScaledAttribute for DPI
# independence?
from ctypes import *
import string
import math
from sys import byteorder
from pyglet.font import base
import pyglet.image
from pyglet.window.carbon import carbon, _oscheck
from pyglet.window.carbon import _create_cfstring
from pyglet.window.carbon.types import *
class FixedPoint(Structure):
_fields_ = [
('x', Fixed),
('y', Fixed)
]
class ATSTrapezoid(Structure):
_fields_ = [
('upperLeft', FixedPoint),
('upperRight', FixedPoint),
('lowerRight', FixedPoint),
('lowerLeft', FixedPoint)
]
# TODO: most of the ATS and CG here not used any more.
CGGlyph = c_ushort
ATSUFontID = c_uint32
RGBColor = c_short * 3
ATSURGBAlphaColor = c_float * 4
kCGImageAlphaNone = 0
kCGImageAlphaPremultipliedLast = 1
kCGTextFill = 0
kATSUInvalidFontErr = -8796
kATSFontContextUnspecified = 0
kATSFontContextGlobal = 1
kATSFontContextLocal = 2
kATSFontFilterSelectorUnspecified = 0
kATSFontFilterSelectorGeneration = 3
kATSFontFilterSelectorFontFamily = 7
kATSFontFilterSelectorFontFamilyApplierFunction = 8
kATSFontFilterSelectorFontApplierFunction = 9
kATSOptionFlagsDoNotNotify = 0x00000001 << 8
kATSOptionFlagsIterationScopeMask = 0x00000007 << 12
kATSOptionFlagsDefaultScope = 0x00000000 << 12
kATSOptionFlagsUnRestrictedScope = 0x00000001 << 12
kATSOptionFlagsRestrictedScope = 0x00000002 << 12
kATSOptionFlagsProcessSubdirectories = 0x00000001 << 6
kATSUFromTextBeginning = c_ulong(0xFFFFFFFF)
kATSUToTextEnd = c_ulong(0xFFFFFFFF)
kATSULineAscentTag = 8
kATSULineDescentTag = 9
ATSUTextMeasurement = Fixed
kATSUQDBoldfaceTag = 256
kATSUQDItalicTag = 257
kATSUFontTag = 261
kATSUSizeTag = 262
kATSUCGContextTag = 32767
kATSUColorTag = 263
kATSURGBAlphaColorTag = 288
kATSULineWidthTag = 1
kFontFullName = 4
kFontNoPlatformCode = c_ulong(-1)
kFontNoScriptCode = c_ulong(-1)
kFontNoLanguageCode = c_ulong(-1)
kATSUseDeviceOrigins = 1
kATSFontFormatUnspecified = 0
kATSFontContextLocal = 2
carbon.CGColorSpaceCreateWithName.restype = c_void_p
carbon.CGBitmapContextCreate.restype = POINTER(c_void_p)
UniCharArrayOffset = c_uint32
UniCharCount = c_uint32
def fixed(value):
# This is a guess... could easily be wrong
#return c_int32(int(value) * (1 << 16))
return c_int32(carbon.Long2Fix(c_long(int(value))))
carbon.Fix2X.restype = c_double
def fix2float(value):
return carbon.Fix2X(value)
def create_atsu_style(attributes):
# attributes is a dict of ATSUAttributeTag => ctypes value
tags, values = zip(*attributes.items())
tags = (c_int * len(tags))(*tags)
sizes = (c_uint * len(values))(*[sizeof(v) for v in values])
values = (c_void_p * len(values))(*[cast(pointer(v), c_void_p) \
for v in values])
style = c_void_p()
carbon.ATSUCreateStyle(byref(style))
carbon.ATSUSetAttributes(style, len(tags), tags, sizes, values)
return style
def set_layout_attributes(layout, attributes):
if attributes:
# attributes is a dict of ATSUAttributeTag => ctypes value
tags, values = zip(*attributes.items())
tags = (c_int * len(tags))(*tags)
sizes = (c_uint * len(values))(*[sizeof(v) for v in values])
values = (c_void_p * len(values))(*[cast(pointer(v), c_void_p) \
for v in values])
carbon.ATSUSetLayoutControls(layout, len(tags), tags, sizes, values)
def str_ucs2(text):
if byteorder == 'big':
text = text.encode('utf_16_be')
else:
text = text.encode('utf_16_le') # explicit endian avoids BOM
return create_string_buffer(text + '\0')
class CarbonGlyphRenderer(base.GlyphRenderer):
_bitmap = None
_bitmap_context = None
_bitmap_rect = None
def __init__(self, font):
super(CarbonGlyphRenderer, self).__init__(font)
self._create_bitmap_context(256, 256)
self.font = font
def __del__(self):
if self._bitmap_context:
carbon.CGContextRelease(self._bitmap_context)
def render(self, text):
# Convert text to UCS2
text_len = len(text)
text = str_ucs2(text)
# Create ATSU text layout for this text and font
layout = c_void_p()
carbon.ATSUCreateTextLayout(byref(layout))
set_layout_attributes(layout, {
kATSUCGContextTag: self._bitmap_context})
carbon.ATSUSetTextPointerLocation(layout,
text,
kATSUFromTextBeginning,
kATSUToTextEnd,
text_len)
carbon.ATSUSetRunStyle(layout, self.font.atsu_style,
kATSUFromTextBeginning, kATSUToTextEnd)
# Turning on transient font matching screws up font layout
# predictability when strange fonts are installed
carbon.ATSUSetTransientFontMatching(layout, False)
# Get bitmap dimensions required
rect = Rect()
carbon.ATSUMeasureTextImage(layout,
kATSUFromTextBeginning,
kATSUToTextEnd,
0, 0,
byref(rect))
image_width = rect.right - rect.left + 2
image_height = rect.bottom - rect.top + 2
baseline = rect.bottom + 1
lsb = rect.left
# Resize Quartz context if necessary
if (image_width > self._bitmap_rect.size.width or
image_height > self._bitmap_rect.size.height):
self._create_bitmap_context(
int(max(image_width, self._bitmap_rect.size.width)),
int(max(image_height, self._bitmap_rect.size.height)))
set_layout_attributes(layout, {
kATSUCGContextTag: self._bitmap_context})
# Get typographic box, which gives advance.
bounds_actual = c_uint32()
bounds = ATSTrapezoid()
carbon.ATSUGetGlyphBounds(
layout,
0, 0,
kATSUFromTextBeginning,
kATSUToTextEnd,
kATSUseDeviceOrigins,
1,
byref(bounds),
byref(bounds_actual))
advance = fix2float(bounds.lowerRight.x) - fix2float(bounds.lowerLeft.x)
# Draw to the bitmap
carbon.CGContextClearRect(self._bitmap_context, self._bitmap_rect)
carbon.ATSUDrawText(layout,
0,
kATSUToTextEnd,
fixed(-lsb + 1), fixed(baseline))
# A negative pitch is required, but it is much faster to load the
# glyph upside-down and flip the tex_coords. Note region used
# to start at top of glyph image.
pitch = int(4 * self._bitmap_rect.size.width)
image = pyglet.image.ImageData(image_width,
self._bitmap_rect.size.height, 'RGBA', self._bitmap, pitch)
skip_rows = int(self._bitmap_rect.size.height - image_height)
image = image.get_region(0, skip_rows, image.width, image_height)
glyph = self.font.create_glyph(image)
glyph.set_bearings(baseline, lsb - 1, int(advance))
t = list(glyph.tex_coords)
glyph.tex_coords = t[9:12] + t[6:9] + t[3:6] + t[:3]
return glyph
def _create_bitmap_context(self, width, height):
'''Create or recreate bitmap and Quartz context.'''
if self._bitmap_context:
carbon.CGContextRelease(self._bitmap_context)
components = 4
pitch = width * components
self._bitmap = (c_ubyte * (pitch * height))()
color_space = carbon.CGColorSpaceCreateDeviceRGB()
context = carbon.CGBitmapContextCreate(self._bitmap,
width, height, 8, pitch,
color_space, kCGImageAlphaPremultipliedLast)
carbon.CGColorSpaceRelease(color_space)
# Disable RGB decimated antialiasing, use standard
# antialiasing which won't break alpha.
carbon.CGContextSetShouldSmoothFonts(context, False)
carbon.CGContextSetShouldAntialias(context, True)
self._bitmap_context = context
self._bitmap_rect = CGRect()
self._bitmap_rect.origin.x = 0
self._bitmap_rect.origin.y = 0
self._bitmap_rect.size.width = width
self._bitmap_rect.size.height = height
class CarbonFont(base.Font):
glyph_renderer_class = CarbonGlyphRenderer
def __init__(self, name, size, bold=False, italic=False, dpi=None):
super(CarbonFont, self).__init__()
if not name:
name = 'Helvetica'
if dpi is not None:
# If application is not DPI-aware, DPI is fixed at 72. Scale
# font size to emulate other DPI if necessary. This will need
# to be fixed if issue #87 is implemented.
size = size * dpi / 72.
font_id = ATSUFontID()
carbon.ATSUFindFontFromName(
name,
len(name),
kFontFullName,
kFontNoPlatformCode,
kFontNoScriptCode,
kFontNoLanguageCode,
byref(font_id))
attributes = {
kATSUSizeTag: fixed(size),
kATSUFontTag: font_id,
kATSURGBAlphaColorTag: ATSURGBAlphaColor(1, 1, 1, 1),
kATSUQDBoldfaceTag: c_byte(bold),
kATSUQDItalicTag: c_byte(italic)
}
self.atsu_style = create_atsu_style(attributes)
self.calculate_metrics()
@classmethod
def have_font(cls, name):
font_id = ATSUFontID()
r = carbon.ATSUFindFontFromName(
name,
len(name),
kFontFullName,
kFontNoPlatformCode,
kFontNoScriptCode,
kFontNoLanguageCode,
byref(font_id))
return r != kATSUInvalidFontErr
def calculate_metrics(self):
# It seems the only way to get the font's ascent and descent is to lay
# out some glyphs and measure them.
# fake ucs2 string
text = '\0a'
layout = c_void_p()
carbon.ATSUCreateTextLayout(byref(layout))
carbon.ATSUSetTextPointerLocation(layout, text,
kATSUFromTextBeginning, kATSUToTextEnd, 1)
carbon.ATSUSetRunStyle(layout, self.atsu_style,
kATSUFromTextBeginning, kATSUToTextEnd)
# determine the metrics for this font only
carbon.ATSUSetTransientFontMatching(layout, False)
value = ATSUTextMeasurement()
carbon.ATSUGetLineControl(layout, 0, kATSULineAscentTag,
sizeof(value), byref(value), None)
self.ascent = int(math.ceil(fix2float(value)))
carbon.ATSUGetLineControl(layout, 0, kATSULineDescentTag,
sizeof(value), byref(value), None)
self.descent = -int(math.ceil(fix2float(value)))
@classmethod
def add_font_data(cls, data):
container = c_void_p()
r = carbon.ATSFontActivateFromMemory(data, len(data),
kATSFontContextLocal, kATSFontFormatUnspecified, None, 0,
byref(container))
_oscheck(r)
| bsd-3-clause |
cysuncn/python | spark/crm/PROC_M_MID_PER_ASSETS.py | 1 | 3335 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_M_MID_PER_ASSETS').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#----------------------------------------------业务逻辑开始----------------------------------------------------------
#源表
TMP_PER_ASSETS_LEND = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_LEND/*')
TMP_PER_ASSETS_LEND.registerTempTable("TMP_PER_ASSETS_LEND")
TMP_PER_ASSETS_INSU = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_INSU/*')
TMP_PER_ASSETS_INSU.registerTempTable("TMP_PER_ASSETS_INSU")
TMP_PER_ASSETS_ACCS = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_ACCS/*')
TMP_PER_ASSETS_ACCS.registerTempTable("TMP_PER_ASSETS_ACCS")
TMP_PER_ASSETS_SAVE = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_SAVE/*')
TMP_PER_ASSETS_SAVE.registerTempTable("TMP_PER_ASSETS_SAVE")
#目标表
#TMP_PER_ASSETS_SUM 全量表
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT
CUST_ID
,'0' AS PRD_TYP
,SUM(MONTH_BAL) AS MONTH_BAL
,SUM(MONTH_AVG_BAL) AS MONTH_AVG_BAL
,SUM(THREE_MONTH_AVG_BAL) AS THREE_MONTH_AVG_BAL
,SUM(LAST_MONTH_BAL) AS LAST_MONTH_BAL
,SUM(LAST_MONTH_AVG_BAL) AS LAST_MONTH_AVG_BAL
,SUM(LTHREE_MONTH_AVG_BAL) AS LTHREE_MONTH_AVG_BAL
,SUM(YEAR_BAL) AS YEAR_BAL
,SUM(YEAR_AVG_BAL) AS YEAR_AVG_BAL
,SUM(YEAR_THREE_AVG_BAL) AS YEAR_THREE_AVG_BAL
,FR_ID
FROM
(SELECT * FROM TMP_PER_ASSETS_LEND
UNION ALL
SELECT * FROM TMP_PER_ASSETS_INSU
UNION ALL
SELECT * FROM TMP_PER_ASSETS_ACCS
UNION ALL
SELECT * FROM TMP_PER_ASSETS_SAVE
)A
GROUP BY CUST_ID,FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_PER_ASSETS_SUM = sqlContext.sql(sql)
TMP_PER_ASSETS_SUM.registerTempTable("TMP_PER_ASSETS_SUM")
dfn="TMP_PER_ASSETS_SUM/"+V_DT+".parquet"
TMP_PER_ASSETS_SUM.cache()
nrows = TMP_PER_ASSETS_SUM.count()
TMP_PER_ASSETS_SUM.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_PER_ASSETS_SUM.unpersist()
#全量表,保存后需要删除前一天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/TMP_PER_ASSETS_SUM/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert TMP_PER_ASSETS_SUM lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 |
rocio/rethinkdb | bench/serializer-bench/clean_bench.py | 5 | 3954 | #!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
import subprocess, stat, os, re, time
def format_args(d):
"""Formats a dictionary of key-value pairs into a list of arguments of the form
['--KEY1', 'VALUE1', '--KEY2', 'VALUE2', ...] suitable for passing to the subprocess module."""
args = []
for (k, v) in d.iteritems():
assert k[0] != '-'
args.append("--" + k)
args.append(str(v))
return args
def run_clean_bench(drive_path, parameters, workload):
"""Benchmark the given drive with the RethinkDB serializer.
'parameters' is the parameters for the serializer; keys should be flags that can be passed to
RethinkDB without the leading "--". Example: { "extent-size": 1024*1024, "block-size": 4096 }
'workload' is workload parameters for serializer-bench, in the same format as 'parameters'.
Return value will be None. Test output will be put in "transactions.txt"."""
drive_path = os.path.abspath(drive_path)
assert stat.S_ISBLK(os.stat(drive_path)[stat.ST_MODE])
assert " " not in drive_path
assert "duration" not in parameters
# Secure-erase the drive as the first step of putting it in a steady state
# Note that 'hdparm --secure-erase' is only safe for use on solid state drives!
if False:
print "Secure erasing %r..." % drive_path
start_time = time.time()
drive_password = "password"
subprocess.check_call(["hdparm", "--user-master", "u", "--security-set-pass", drive_password, drive_path])
try:
subprocess.check_call(["hdparm", "--user-master", "u", "--security-erase", drive_password, drive_path])
finally:
subprocess.check_call(["hdparm", "--user-master", "u", "--security-unlock", drive_password, drive_path])
print "Took %.3f seconds." % (time.time() - start_time)
# Now fill it with the RethinkDB serializer, the second step of putting it into a steady state
print "Putting %r into a steady state..." % drive_path
start_time = time.time()
serv = subprocess.Popen(
["./serializer-bench", "-f", drive_path, "--forever"] + format_args(parameters),
stderr = subprocess.PIPE, stdout = subprocess.PIPE)
try:
output = serv.communicate()[1]
finally:
try: serv.terminate()
except RuntimeError: pass
print "Took %.3f seconds." % (time.time() - start_time)
assert "RethinkDB ran out of disk space." in output
# Now run the actual test on it
print "Running the actual test..."
start_time = time.time()
serv = subprocess.Popen(
["./serializer-bench", "-f", drive_path, "--log", "transactions.txt"] + format_args(parameters) + format_args(workload),
stderr = subprocess.PIPE, stdout = subprocess.PIPE)
try:
serv.wait()
finally:
try: serv.terminate()
except RuntimeError: pass
if serv.returncode != 0:
raise RuntimeError("RethinkDB serializer failed:\n" + output)
print "Took %.3f seconds." % (time.time() - start_time)
print "Done."
if __name__ == "__main__":
parameter_flags = ["block-size", "extent-size", "active-data-extents", "file-zone-size"]
workload_flags = ["duration", "concurrent", "inserts-per-txn", "updates-per-txn"]
import sys
del sys.argv[0]
drive_path = sys.argv.pop(0)
parameters = {}
workload = {}
while sys.argv:
flag = sys.argv.pop(0)
if flag.startswith("--") and flag[2:] in parameter_flags:
parameters[flag[2:]] = int(sys.argv.pop(0))
elif flag.startswith("--") and flag[2:] in workload_flags:
workload[flags[2:]] = int(sys.argv.pop(0))
else:
raise ValueError("Bad argument argument %r.", flag)
result = run_clean_bench(drive_path, parameters, workload)
print "The test took %.3f seconds." % result
| agpl-3.0 |
evanthebouncy/nnhmm | uai_sushi/script_test_sort.py | 1 | 1245 | from model import *
from draw import *
from naive_baseline import *
from quicksort import *
# ------------- helpers --------------
def get_id_map(start_sort, truth):
ret = dict(zip(start_sort, truth))
return ret
def pred_acc(preds, qry):
num_cor = 0
for i in range(L):
for j in range(L):
if np.argmax(preds[i][j]) == np.argmax(qry((i,j))):
num_cor += 1
return float(num_cor) / L*L
def ord_2_pred(ordr):
ret = np.zeros([L,L,2])
for i in range(L):
for j in range(L):
if ordr.index(i) < ordr.index(j):
ret[i][j] = [1.0, 0.0]
else:
ret[i][j] = [0.0, 1.0]
return ret
num_sort = np.array([0.0 for _ in range(L*L)])
for _ in range(2500):
img, _x = get_img_class(test=True, idx=_)
qry = mk_query(_x)
start_sort = np.random.permutation(L)
id_mapping = get_id_map(range(L), _x)
trace = sorta(start_sort)
print "truth"
print _x
for idx, blah in enumerate(trace):
trace[idx] = map(lambda x: id_mapping[x], blah)
for i in range(L*L):
tr = trace[i] if i < len(trace) else trace[-1]
preds = ord_2_pred(tr)
num_sort[i] += pred_acc(preds, qry)
print num_sort / (_ + 1)
| mit |
rds0751/colinkers | env/Lib/encodings/iso8859_5.py | 272 | 13015 | """ Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-5',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO
'\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE
'\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE
'\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE
'\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI
'\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE
'\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE
'\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE
'\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE
'\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE
'\xad' # 0xAD -> SOFT HYPHEN
'\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U
'\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE
'\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xDE -> CYRILLIC SMALL LETTER O
'\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE
'\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0xED -> CYRILLIC SMALL LETTER E
'\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU
'\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA
'\u2116' # 0xF0 -> NUMERO SIGN
'\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO
'\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE
'\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE
'\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE
'\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI
'\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE
'\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE
'\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE
'\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE
'\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE
'\xa7' # 0xFD -> SECTION SIGN
'\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U
'\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| agpl-3.0 |
llhe/tensorflow | tensorflow/tensorboard/backend/http_util.py | 7 | 5714 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard HTTP utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gzip
import json
import re
import time
import wsgiref.handlers
import six
import tensorflow as tf
from werkzeug import wrappers
from tensorflow.tensorboard.backend import json_util
_EXTRACT_MIMETYPE_PATTERN = re.compile(r'^[^;\s]*')
_EXTRACT_CHARSET_PATTERN = re.compile(r'charset=([-_0-9A-Za-z]+)')
# Allows *, gzip or x-gzip, but forbid gzip;q=0
# https://tools.ietf.org/html/rfc7231#section-5.3.4
_ALLOWS_GZIP_PATTERN = re.compile(
r'(?:^|,|\s)(?:(?:x-)?gzip|\*)(?!;q=0)(?:\s|,|$)')
_TEXTUAL_MIMETYPES = set([
'application/javascript',
'application/json',
'application/json+protobuf',
'image/svg+xml',
'text/css',
'text/csv',
'text/html',
'text/plain',
'text/tab-separated-values',
'text/x-protobuf',
])
_JSON_MIMETYPES = set([
'application/json',
'application/json+protobuf',
])
def Respond(request,
content,
content_type,
code=200,
expires=0,
content_encoding=None,
encoding='utf-8'):
"""Construct a werkzeug Response.
Responses are transmitted to the browser with compression if: a) the browser
supports it; b) it's sane to compress the content_type in question; and c)
the content isn't already compressed, as indicated by the content_encoding
parameter.
Browser and proxy caching is completely disabled by default. If the expires
parameter is greater than zero then the response will be able to be cached by
the browser for that many seconds; however, proxies are still forbidden from
caching so that developers can bypass the cache with Ctrl+Shift+R.
For textual content that isn't JSON, the encoding parameter is used as the
transmission charset which is automatically appended to the Content-Type
header. That is unless of course the content_type parameter contains a
charset parameter. If the two disagree, the characters in content will be
transcoded to the latter.
If content_type declares a JSON media type, then content MAY be a dict, list,
tuple, or set, in which case this function has an implicit composition with
json_util.Cleanse and json.dumps. The encoding parameter is used to decode
byte strings within the JSON object; therefore transmitting binary data
within JSON is not permitted. JSON is transmitted as ASCII unless the
content_type parameter explicitly defines a charset parameter, in which case
the serialized JSON bytes will use that instead of escape sequences.
Args:
request: A werkzeug Request object. Used mostly to check the
Accept-Encoding header.
content: Payload data as byte string, unicode string, or maybe JSON.
content_type: Media type and optionally an output charset.
code: Numeric HTTP status code to use.
expires: Second duration for browser caching.
content_encoding: Encoding if content is already encoded, e.g. 'gzip'.
encoding: Input charset if content parameter has byte strings.
Returns:
A werkzeug Response object (a WSGI application).
"""
mimetype = _EXTRACT_MIMETYPE_PATTERN.search(content_type).group(0)
charset_match = _EXTRACT_CHARSET_PATTERN.search(content_type)
charset = charset_match.group(1) if charset_match else encoding
textual = charset_match or mimetype in _TEXTUAL_MIMETYPES
if mimetype in _JSON_MIMETYPES and (isinstance(content, dict) or
isinstance(content, list) or
isinstance(content, set) or
isinstance(content, tuple)):
content = json.dumps(json_util.Cleanse(content, encoding),
ensure_ascii=not charset_match)
if charset != encoding:
content = tf.compat.as_text(content, encoding)
content = tf.compat.as_bytes(content, charset)
if textual and not charset_match and mimetype not in _JSON_MIMETYPES:
content_type += '; charset=' + charset
if (not content_encoding and textual and
_ALLOWS_GZIP_PATTERN.search(request.headers.get('Accept-Encoding', ''))):
out = six.BytesIO()
f = gzip.GzipFile(fileobj=out, mode='wb', compresslevel=3)
f.write(content)
f.close()
content = out.getvalue()
content_encoding = 'gzip'
if request.method == 'HEAD':
content = ''
headers = []
headers.append(('Content-Length', str(len(content))))
if content_encoding:
headers.append(('Content-Encoding', content_encoding))
if expires > 0:
e = wsgiref.handlers.format_date_time(time.time() + float(expires))
headers.append(('Expires', e))
headers.append(('Cache-Control', 'private, max-age=%d' % expires))
else:
headers.append(('Expires', '0'))
headers.append(('Cache-Control', 'no-cache, must-revalidate'))
return wrappers.Response(
response=content, status=code, headers=headers, content_type=content_type)
| apache-2.0 |
54shady/android-iw-libnl3 | python/doc/conf.py | 30 | 7141 | # -*- coding: utf-8 -*-
#
# libnl-python documentation build configuration file, created by
# sphinx-quickstart on Mon May 9 10:58:58 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libnl-python'
copyright = u'2011, Thomas Graf <tgraf@suug.ch>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libnl-pythondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libnl-python.tex', u'libnl-python Documentation',
u'Thomas Graf \\textless{}tgraf@suug.ch\\textgreater{}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libnl-python', u'libnl-python Documentation',
[u'Thomas Graf <tgraf@suug.ch>'], 1)
]
| lgpl-2.1 |
ionrock/compose | compose/cli/multiplexer.py | 21 | 1323 | from __future__ import absolute_import
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # Python 3.x
STOP = object()
class Multiplexer(object):
"""
Create a single iterator from several iterators by running all of them in
parallel and yielding results as they come in.
"""
def __init__(self, iterators):
self.iterators = iterators
self._num_running = len(iterators)
self.queue = Queue()
def loop(self):
self._init_readers()
while self._num_running > 0:
try:
item, exception = self.queue.get(timeout=0.1)
if exception:
raise exception
if item is STOP:
self._num_running -= 1
else:
yield item
except Empty:
pass
def _init_readers(self):
for iterator in self.iterators:
t = Thread(target=_enqueue_output, args=(iterator, self.queue))
t.daemon = True
t.start()
def _enqueue_output(iterator, queue):
try:
for item in iterator:
queue.put((item, None))
queue.put((STOP, None))
except Exception as e:
queue.put((None, e))
| apache-2.0 |
tochange/ueditor | _test/tools/lib/jshunter_1.2.0.1/jshunter_dev/jshunter/hint.py | 28 | 13510 | #!/usr/bin/python
#encoding=utf-8
import sys,os
import ConfigParser
import commands
import time
import codecs
htmlMap = {}
def processHtml(filename):
f = open(filename,"r")
if f is None:
raise Exception('open %s error!' % (filename))
newName = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1]) + os.sep + "core" + os.sep + "data"+os.sep+os.path.basename(filename) + "." + str(time.time())
#print newName
tmp = ""
flag=False
orig_num = 0
new_num = 0
for ln in f:
orig_num += 1
if ln.find("<script type=\"text/javascript\">") != -1 and ln.find("</script>") == -1:
flag=True
continue
if ln.find("<script type=\"text/javascript\">") != -1 and ln.find("</script>") != -1:
flag=False
continue
if ln.find("<script>") != -1 and ln.find("</script>") == -1:
flag=True
continue
if ln.find("<script>") != -1 and ln.find("</script>") != -1:
flag=False
continue
if ln.find("</script>") != -1:
flag=False
continue
if flag == True:
tmp += ln
new_num += 1
htmlMap[filename+os.sep+str(orig_num)]=newName + os.sep + str(new_num)
if tmp == "":
print ('[WARNING]file %s not contain js code' % (filename))
return ""
outfile = open(newName,"w")
if outfile is None:
raise Exception('open %s error!' % (newName))
outfile.write(tmp)
#print htmlMap
return newName
def clearTmpFiles():
dels=[]
for key in htmlMap.keys():
fn = os.sep.join(htmlMap[key].split(os.sep)[:-1])
if not fn in dels:
dels.append(fn)
#print dels
for item in dels:
os.remove(item)
def isHiddenFile(path):
itms = path.split(os.sep)
for itm in itms:
if itm != "" and itm != "." and itm != ".." and itm[0]==".":
return True
return False
'''
@input
a file (or a top dir) to be checked
@process
recuresively read all the files of a dir.only support .html and .js.
if it is a html file then we only check the code between <script></script>
if it is a js file then we check all the code
@return
file(or folder) to be checked
'''
def getFiles(paths):
#print paths
ret = []
omitpath = os.path.dirname(__file__) + os.sep + "conf" + os.sep + "omitfiles.conf"
for path in paths:
path = path.rstrip(os.sep)
if not os.path.isdir(path):
omitfiles = getOmitedFiles(omitpath,os.sep.join(path.split(os.sep)[:-1])+os.sep)
if (os.path.getsize(path)==0) or (path in omitfiles):
continue
if path.find(".js") != -1 and isHiddenFile(path)==False:
ret.append(path)
elif path.find(".html") != -1 and isHiddenFile(path)==False:
np = processHtml(path)
if np != "":
ret.append(np)
else:
omitfiles = getOmitedFiles(omitpath,path)
for root, dirs, files in os.walk(path):
for f in files:
if (os.path.getsize(root + os.sep + f)==0) or ((root + os.sep + f) in omitfiles):
continue
if f.find(".js") != -1 and isHiddenFile(root + os.sep + f) == False:
ret.append(root + os.sep + f)
elif f.find(".html") != -1 and isHiddenFile(root + os.sep + f) == False:
np=processHtml(root + os.sep + f)
if np != "":
ret.append(np)
else:
continue
return ret
def getopt(path):
_opt=[]
_predef=[]
conf = ConfigParser.ConfigParser()
conf.read(path)
for item in conf.options('option'):
_opt.append("%s=%s"%(item,conf.get('option',item)))
for item in conf.options('predef'):
_predef.append("%s=%s"%(item,conf.get('predef',item)))
return "%s %s"%(",".join(_opt),",".join(_predef))
def getBlackList(path):
lst={}
conf = ConfigParser.ConfigParser()
conf.read(path)
for item in conf.options('level'):
lst[item]=conf.get('level',item)
return lst
def printReport(rptstr):
if rptstr=="":
return
array = rptstr.split('\n')
for ln in array:
items = ln.split("***")
if items[1].find("Stopping") != -1:
print items[1]
else:
print "文件:%s\t错误原因:%s\t错误位置:第%s行\t错误语句:%s"%(items[0],items[1],items[2],items[4])
def processItem(item,blacklst,hp):
itm = item
for key in hp.keys():
if item[0]+os.sep+item[2] == hp[key]:
itm[0]=os.sep.join(key.split(os.sep)[:-1])
itm[2]=key.split(os.sep)[-1]
#print "====",itm,"======"
itm.append("error")
for err in blacklst.keys():
if itm[1].lower().find(err.lower())!=-1:
itm[5] = blacklst[err]
break
return itm
'''
parse jshint output
'''
def splitOutput(rptstr,blacklist,mp):
if rptstr=="":
return
array = rptstr.split('\n')
lst=[]
parsecnt = 0;
parsetotal=len(array)
for ln in array:
#print ln
if ln.find("***") == -1:
continue
items = processItem(ln.split("***"),blacklist,mp)
if items[1].find("Stopping") != -1:
print items[1]
else:
lst.append(items)
parsecnt = parsecnt + 1
return (lst,parsecnt,parsetotal)
'''
get the table body according to the result-list
'''
def getBody(lst):
files={}
error=0
warning=0
ignore=0;
count=0
for item in lst:
count = count + 1
if len(item)<6:
continue
if not files.has_key(item[0]):
if item[5] == "ignore":
ignore = ignore + 1
elif item[5] == "error":
error = error + 1
files[item[0]] = getLine(item,count)
else:
files[item[0]] = getLine(item,count)
warning = warning + 1
else:
if item[5] == "ignore":
ignore = ignore + 1
elif item[5] == "error":
error = error + 1
files[item[0]] = files[item[0]] + getLine(item,count)
else:
files[item[0]] = files[item[0]] + getLine(item,count)
warning = warning + 1
return (files,ignore,warning,error)
def generateHtml(rptstr,outfile,blacklst,mp):
print "start parsing jshint output..."
(lst,parsecnt,parsetotal) = splitOutput(rptstr,blacklst,mp)
print "prepare main tpl..."
tpl=""
tplPath=os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1])+os.sep+"core"+os.sep+"tpl"+os.sep+"toggle_tpl.html"
if not os.path.exists(tplPath):
raise Exception('%s file does not exists!'%(tplPath))
f=open(tplPath,"r")
if f is None:
raise Exception('open %s error!' % (tplPath))
for ln in f:
tpl+=ln
strStartTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
tpl=tpl.replace("{$pnumber}",str(len(lst)))
tpl=tpl.replace("{$timeData}",strStartTime)
f.close()
print "blacklist filtering..."
(files,ignore,warning,error) = getBody(lst)
tpl=tpl.replace("{$ignumber}","%s(%5.1f%%)"%(str(ignore),(float(ignore)/len(lst)*100)))
tpl=tpl.replace("{$errnumber}","%s(%5.1f%%)"%(str(error),(float(error)/len(lst)*100)))
tpl=tpl.replace("{$warnumber}","%s(%5.1f%%)"%(str(warning),(float(warning)/len(lst)*100)))
print "prepare body"
bodys=""
for key in files:
tblPath=os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1])+os.sep+"core"+os.sep+"tpl"+os.sep+"htmlpart.html"
if not os.path.exists(tblPath):
raise Exception('%s file does not exists!'%(tblPath))
f=open(tblPath,"r")
body=''''''
for ln in f:
body += ln
body = body.replace("{$title}",key)
body = body.replace("{$fname}",key)
f.close()
body = body.replace("[---to be replaced 2---]",files[key])
bodys = bodys + body
if bodys=="":
#raise Exception('no report generated')
bodys = "no informatin maybe they are filtered"
tpl=tpl.replace("[---to be replace 1---]",bodys)
ts=str(int(time.time()))
resf=open(outfile,"w")
if resf is None:
raise Exception('open %s error!' % (outfile))
resf.write(tpl)
resf.close()
print "generate html file %s OK!"%(outfile)
return ts
def getLine(item,no):
#print item
text = '''<tr><td width='10%%'>%s</td><td width='10%%'>%s</td><td width='20%%'>%s</td><td width='10%%'>%s</td><td width='50%%'>%s</td></tr>'''%(str(no),item[5],item[1],item[2],item[4].replace("<","<").replace(">",">"))
return text
def genReport(status,output,blackpath,outfile,htmlMap):
#if status == 0:
# print "[WARNING][NO ERROR DETECTED BY JSHUNTER]"
#else:
if output.find("open file") != -1:
raise Exception("File Not Found Error!")
print "[ERROR DETECTED BY JSHUNTER]"
blacklist = getBlackList(blackpath)
ts = generateHtml(output,outfile,blacklist,htmlMap)
return ts
def checkJavaExist():
cmd="java"
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
raise Exception("jshunter depend on java enviroment.please make sure your java is OK")
def checkPythonExist():
cmd="python -h"
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
raise Exception("jshunter depend on python enviroment.please make sure your python is OK")
def getCustomerCheckFiles(paths):
omitpath = os.path.dirname(__file__) + os.sep + "conf" + os.sep + "omitfiles.conf"
ret = []
for path in paths:
path = path.rstrip(os.sep)
if not os.path.isdir(path):
omitfiles = getOmitedFiles(omitpath,os.sep.join(path.split(os.sep)[:-1])+os.sep)
if ((os.path.getsize(path)==0) or (path in omitfiles)):
continue
ret.append(path)
else:
omitfiles = getOmitedFiles(omitpath,path)
for root, dirs, files in os.walk(path):
for f in files:
if (os.path.getsize(root + os.sep + f)==0) or ((root + os.sep + f) in omitfiles):
continue
else:
ret.append(root + os.sep + f)
return ret
def getOmitedFiles(confpath,topdir):
_res = []
_opt = []
conf = ConfigParser.ConfigParser()
conf.read(confpath)
for item in conf.options('omitfils'):
if conf.get('omitfils',item) == "true":
_opt.append(item)
if (not os.path.isdir(topdir)) and (len(_opt) > 0):
return []
for item in _opt:
cmd = 'find %s -name "%s"'%(topdir,item)
#print cmd
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
raise Exception("[FATAL]cmd failed!%s"%(cmd))
#print output
for ln in output.split("\n"):
_res.append(ln)
return _res
def usage():
print "===================================================================================================================="
print "[Usage]\n./hint outpath.html fileToCheck.js\t\t检查fileToCheck.js这个文件"
print "./hint outpath.html folderToCheck\t\t检查folderToCheck这个目录内的所有js文件和html文件(递归检查)"
print "./hint outpath.html folderToCheck/*.js\t\t检查folderToCheck一级目录下的所有js文件和html文件(忽略目录)"
print "[Notice]使用时请确保当前目录中包含jshint.js文件,建议cd到jshunter的目录中执行./hint.py"
print "[Contact] xxxxxxx@baidu.com xxxx@baidu.com"
print "===================================================================================================================="
def doJsHint(_path):
fileToCheck=getFiles(_path)
step = 50
javapath = "java"
jsjar = os.path.dirname(__file__) + os.sep + "core" + os.sep + "jshint" + os.sep + "js.jar"
rhino = os.path.dirname(__file__) + os.sep + "core" + os.sep + "jshint" + os.sep + "jshint-rhino.js"
confpath = os.path.dirname(__file__) + os.sep + "conf" + os.sep + "check.cfg"
blackpath = os.path.dirname(__file__) + os.sep + "conf" + os.sep + "ignore.list"
opt = getopt(confpath)
sz = len(fileToCheck)
if sz <= 0:
print "[WARNING]no file to be checked in doJsHint"
return ""
print "Files to be checked Number: %d"%(sz)
for i in range(0,sz):
print (i+1),":",fileToCheck[i]
output=""
if sz <= step:
opt = "%s %s"%(getopt(confpath)," ".join(fileToCheck[:]))
cmd = "%s -jar %s %s %s %s"%(javapath,jsjar,rhino,os.path.dirname(__file__) + os.sep,opt)
(status,output) = commands.getstatusoutput(cmd)
else:
rd=sz/step+1
for j in range(0,rd):
if (j+1)*step>sz:
opt = "%s %s"%(getopt(confpath)," ".join(fileToCheck[j*step:sz]))
else:
opt = "%s %s"%(getopt(confpath)," ".join(fileToCheck[j*step:(j+1)*step]))
cmd = "%s -jar %s %s %s %s"%(javapath,jsjar,rhino,os.path.dirname(__file__) + os.sep,opt)
(status,output_tmp)=commands.getstatusoutput(cmd)
if status != 0 and output_tmp.find("open file") != -1:
raise Exception("File Not Found Error.ERRMSG:%s\n"%(output_tmp))
else:
output = output + output_tmp
print "Finish %5.1f%%"%((j+1)*float(str(step))/sz*100.0)
return output
def doCustomerCheck(_path):
confpath = os.path.dirname(__file__) + os.sep + "conf" + os.sep + "custcheck.conf"
custpath = os.path.dirname(__file__) + os.sep + "core" + os.sep + "customcheck" + os.sep
ops = getCustCheckOpt(confpath)
fileToCheck = getCustomerCheckFiles(_path)
sz = len(fileToCheck)
if sz == 0:
print "[WARNING]no file to be checked in doCustomerCheck"
return ""
custout = ''
#print ops
for item in ops:
for i in range(0,sz):
cmd = "%s %s"%(custpath+item,fileToCheck[i])
#print cmd
(status,output_tmp)=commands.getstatusoutput(cmd)
for ln in output_tmp.split("\n"):
#print ln
if ln.find("***") != -1:
custout += (ln+"\n")
#print custout
return custout
def getCustCheckOpt(path):
_opt=[]
conf = ConfigParser.ConfigParser()
conf.read(path)
for item in conf.options('command'):
if conf.get('command',item) == "true":
_opt.append(item)
return _opt
if __name__ == "__main__":
try:
#checkJavaExist()
#checkPythonExist()
#print "check enviroment ok"
if len(sys.argv) < 3:
usage()
raise Exception("arg number error!")
outfile=sys.argv[1]
if os.path.exists(outfile):
raise Exception('%s already exist!In order to avoid overwrite the file,please change a none-exist file!'%(outfile))
blackpath = os.path.dirname(__file__) + os.sep + "conf" + os.sep + "ignore.list"
filepath = sys.argv[2:]
output1 = doJsHint(filepath)
output2 = doCustomerCheck(filepath)
output = output1 + output2
if output != "":
ts = genReport(0,output,blackpath,outfile,htmlMap)
else:
print "[WARNING]%s"%("no error detected")
clearTmpFiles()
except Exception,err:
print "[FATAL]%s"%(err)
clearTmpFiles()
sys.exit(1)
| mit |
gurneyalex/OpenUpgrade | addons/web_analytics/__openerp__.py | 62 | 1409 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Analytics',
'version': '1.0',
'category': 'Tools',
'complexity': "easy",
'description': """
Google Analytics.
=================
Collects web application usage with Google Analytics.
""",
'author': 'OpenERP SA',
'website': 'http://openerp.com',
'depends': ['web'],
'data': [
'views/web_analytics.xml',
],
'installable': True,
'active': False,
}
| agpl-3.0 |
petermat/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/urls.py | 117 | 3095 | # Copyright (c) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
def view_source_url(local_path):
return "http://trac.webkit.org/browser/trunk/%s" % local_path
def view_revision_url(revision_number):
return "http://trac.webkit.org/changeset/%s" % revision_number
contribution_guidelines = "http://webkit.org/coding/contributing.html"
bug_server_domain = "webkit.org"
bug_server_host = "bugs." + bug_server_domain
_bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host)
bug_server_url = "https://%s/" % bug_server_host
bug_url_long = _bug_server_regex + r"show_bug\.cgi\?id=(?P<bug_id>\d+)(&ctype=xml|&excludefield=attachmentdata)*"
bug_url_short = r"https?\://%s/b/(?P<bug_id>\d+)" % bug_server_domain
attachment_url = _bug_server_regex + r"attachment\.cgi\?id=(?P<attachment_id>\d+)(&action=(?P<action>\w+))?"
direct_attachment_url = r"https?://bug-(?P<bug_id>\d+)-attachments.%s/attachment\.cgi\?id=(?P<attachment_id>\d+)" % bug_server_domain
buildbot_url = "http://build.webkit.org"
def parse_bug_id(string):
if not string:
return None
match = re.search(bug_url_short, string)
if match:
return int(match.group('bug_id'))
match = re.search(bug_url_long, string)
if match:
return int(match.group('bug_id'))
return None
def parse_attachment_id(string):
if not string:
return None
match = re.search(attachment_url, string)
if match:
return int(match.group('attachment_id'))
match = re.search(direct_attachment_url, string)
if match:
return int(match.group('attachment_id'))
return None
| bsd-3-clause |
cGVuaXM/botcnt | PYodbcutils.py | 1 | 2151 | #https://github.com/mkleehammer/pyodbc/wiki/
import os
import re
import logging
import pyodbc
from openpyxl import Workbook
from openpyxl.styles import Font
class SQLserver:
def __init__(self, params, autocommit=False, searchescape=None, timeout=None):
params_str = r'DRIVER={ODBC Driver 11 for SQL Server};'
params_str += r"SERVER=%s;" % (params["server"])
params_str += r"DATABASE=%s;" % (params["database"])
params_str += r"UID=%s;" % (params["uid"])
params_str += r"PWD=%s;" % (params["pwd"])
params_str += r"APP=%s;" % (params.get("app", None) or "TelegramBot")
self._conn = pypyodbc.connect(params_str, autocommit=autocommit, searchescape=searchescape, timeout=timeout) #mantieni la connessione al db viva
def commit(self):
if not self._conn.autocommit: #ignora se autocommit attivo
self._conn.commit()
return True
else:
return False
def execSQL(self, SQLcommand):
cursor = self._connection.cursor()
try:
cursor.execute(SQLCommand)
except pypyodbc.ProgrammingError:
return 0
return cursor
def _toExcel(self, rows, file_path, sheet2=None):
"""
sheet2 = {
"title": "nome folgio 2",
"A1": "contenuto cella A1"
}
"""
wb = Workbook()
bold_font = Font(color='00FF0000', bold=True)
ws = wb.active #usa il foglio corrente
ws.title("Risultati") #cambia nome foglio
ws.sheet_properties.tabColor = "5BF77D" #colore background foglio (verde)
for row in rows:
ws.append(row)
for cell in ws["1:1"]:
cell.font = bold_font
if sheet2:
ws = wb.create_sheet(sheet2['title']) #foglio con la query
ws.sheet_properties.tabColor = "7BC1ED" #cambia colore background foglio (blu)
ws['A1'] = sheet2['A1'] #scrivi stringa col comando nella prima cella del foglio
wb.save(file_path)
return file_path
def to_excel(self, SQLcommand, file_path):
cursor = execSQL(SQLcommand)
if cursor == 0:
return cursor
columns = [column[0] for column in cursor.description]
rows = []
rows.append(columns)
for row in cursor.fetchall():
rows.append(row)
return self._toExcel(rows, file_path, {"title": "query", "A1": SQLcommand})
| mit |
batxes/4c2vhic | Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/Six_zebra_models44006.py | 4 | 13925 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((12975.4, 63.9161, 9619.77), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((12878.8, -933.001, 9747.6), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((11451.8, 336.988, 9437.77), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((9756.33, 1896.77, 9047.94), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((9227.42, 2379.38, 8887.95), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((9714.49, 580.899, 7869.93), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((8329.7, 1306.24, 6749.06), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((7613.86, 252.791, 5593.67), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((6083.3, 276.264, 5130.47), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((4446.32, 641.195, 4336.33), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3067.19, 1032.63, 5372.85), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((1524.61, -426.924, 5613.35), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((74.5148, -1908.4, 5786.06), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((1426.81, -2091.3, 6642.32), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((849.883, -1257.08, 5321.11), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((1337.84, -54.0171, 4441.54), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((2581.38, 508.839, 4183.63), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((3921.19, 1262.74, 4203.06), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((5220.87, -18.5453, 4108.29), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((6125.02, 775.99, 3064.93), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((6834.24, 1671.21, 1579.15), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((7424.1, 1763.44, -76.3751), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((7338.31, 2692.46, 1030.94), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((6059.54, 3780.35, 2291.13), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((4261.37, 4750.31, 2937.72), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((3347.65, 5194.53, 3185.28), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((4701.18, 7012.53, 4684.87), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4939.1, 8752.89, 5292.49), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((6104.21, 9090.34, 4944.12), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((7922.15, 10156.9, 3960.67), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((7569.45, 9531.5, 4236.39), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((7394.06, 10735.5, 5090.76), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((8475.12, 12503.9, 6047.76), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((8773.36, 12244.5, 7462.46), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((9620.96, 11438.1, 8429.21), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((11102.9, 11303.7, 9394.54), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((12715.1, 10830.4, 9245.41), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((12460.4, 10610, 7661.66), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((11755.5, 11928.4, 7284.4), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((9783.6, 11635.2, 7425.61), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((9486.64, 12398.3, 8388.26), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((9131.69, 11232.7, 7764.08), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((9632.14, 10610.3, 8195.08), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((9293.96, 11266.2, 7912.52), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((8109.46, 11494.9, 6526.5), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((5735.92, 9838.99, 6514), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((4272.66, 9487.91, 7629.31), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((3952.31, 9194.5, 8715.32), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((3812.5, 10317, 10350.8), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((3221.04, 12366.4, 11951.7), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((4503.64, 13228.2, 11325.4), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((3969.34, 12769, 8484.07), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((4623.38, 12372, 8708.23), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6373.48, 12922.3, 9296.82), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((7768.61, 12628.7, 9874.54), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((8194.64, 10832.5, 9685.46), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
pdellaert/ansible | lib/ansible/playbook/taggable.py | 96 | 3170 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
class Taggable:
untagged = frozenset(['untagged'])
_tags = FieldAttribute(isa='list', default=list, listof=(string_types, int), extend=True)
def _load_tags(self, attr, ds):
if isinstance(ds, list):
return ds
elif isinstance(ds, string_types):
value = ds.split(',')
if isinstance(value, list):
return [x.strip() for x in value]
else:
return [ds]
else:
raise AnsibleError('tags must be specified as a list', obj=ds)
def evaluate_tags(self, only_tags, skip_tags, all_vars):
''' this checks if the current item should be executed depending on tag options '''
if self.tags:
templar = Templar(loader=self._loader, variables=all_vars)
tags = templar.template(self.tags)
_temp_tags = set()
for tag in tags:
if isinstance(tag, list):
_temp_tags.update(tag)
else:
_temp_tags.add(tag)
tags = _temp_tags
self.tags = list(tags)
else:
# this makes isdisjoint work for untagged
tags = self.untagged
should_run = True # default, tasks to run
if only_tags:
if 'always' in tags:
should_run = True
elif ('all' in only_tags and 'never' not in tags):
should_run = True
elif not tags.isdisjoint(only_tags):
should_run = True
elif 'tagged' in only_tags and tags != self.untagged and 'never' not in tags:
should_run = True
else:
should_run = False
if should_run and skip_tags:
# Check for tags that we need to skip
if 'all' in skip_tags:
if 'always' not in tags or 'always' in skip_tags:
should_run = False
elif not tags.isdisjoint(skip_tags):
should_run = False
elif 'tagged' in skip_tags and tags != self.untagged:
should_run = False
return should_run
| gpl-3.0 |
vitalogy/openbricks | packages/multimedia/freevo/config/local_conf.py | 17 | 61400 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# local_conf.py - System configuration
# -----------------------------------------------------------------------
# $Id: local_conf.py.example 11525 2009-05-17 13:25:03Z duncan $
#
# Notes:
#
# This file contains your freevo settings, overriding the settings in
# freevo_config.py. freevo_config.py, which is usually installed in
# /usr/share/freevo, contains all the core settings. To change the settings copy
# this file to ~/.freevo/local_conf.py or /etc/freevo/local_conf.py
#
# It does not contain all the possible settings that you can change, see
# freevo_config.py for all the possible settings. Also it does not contain
# settings for the plug-ins, plug-ins contain their configuration information and
# the can be listed with:
# | freevo plugins -l
# and the settings can se shown with
# | freevo plugins -i <name of plug-in>
#
# E.g.: when you want a alsa as mplayer audio out, just put in local_conf.py:
# | MPLAYER_AO_DEV = 'alsa9'
#
# The vertical line indicates code.
#
# This is no normal config file, it's Python code. Because of that, you
# need to follow some rules to avoid crashes. The examples should explain
# the settings, but make sure a line starting with a variable has NO SPACES OR
# TABS at the beginning.
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2002 Krister Lagerstrom, et al.
# Please see the file freevo/Docs/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
CONFIG_VERSION = 5.27
# ======================================================================
# General freevo settings:
# ======================================================================
# LOCALE='iso-8859-15'
# AUDIO_DEVICE = '/dev/dsp' # e.g.: /dev/dsp0, /dev/audio, /dev/alsa/?
# AUDIO_INPUT_DEVICE = '/dev/dsp1' # e.g.: /dev/dsp0, /dev/audio, /dev/alsa/?
# MIXER_MAJOR_CTRL = 'VOL' # Freevo takes control over one audio ctrl
# 'VOL', 'PCM' 'OGAIN' etc.
# MIXER_MAJOR_MUTE_CTRL = 'PCM' # used in alsamixer.py There are systems where
# volume and mute use different controls
# MIXER_DEVICE = '/dev/mixer' # mixer device
# MIXER_CONTROL_ALL = 1 # Should Freevo take complete control of audio
# MIXER_VOLUME_MAX = 90 # Set what you want maximum volume level to be.
# MIXER_VOLUME_DEFAULT = 40 # Set default volume level.
# MIXER_VOLUME_TV_IN = 60 # Set this to your preferred level 0-100.
# MIXER_VOLUME_VCR_IN = 90 # If you use different input from TV
# MIXER_VOLUME_RADIO_IN = 80 # Set this to your preferred level 0-100.
# START_FULLSCREEN_X = 0 # Start in fullscreen mode if using x11 or xv.
# SYS_SHUTDOWN_CONFIRM = 1 # ask before shutdown
#
# Physical ROM drives, multiple ones can be specified
# by adding comma-seperated and quoted entries.
#
# Format [ ('mountdir1', 'devicename1', 'displayed name1'),
# ('mountdir2', 'devicename2', 'displayed name2'), ...]
#
# Set to None to autodetect drives in during startup from /etc/fstab,
# set to [] to disable rom drive support at all
#
# ROM_DRIVES = None
# ROM_DRIVES_AUTOFS = False # Indicates that an automounter daemon is being used.
# Does not try to mount/umount the media.
#
# Hide discs from the wrong menu (e.g. VCDs in audio menu) and empty discs.
#
# HIDE_UNUSABLE_DISCS = 1
#
# Attempt to set the speed of the ROM drive. A good value for keeping the
# drive silent while playing movies is 8.
#
# ROM_SPEED = 0
#
# Perform a whole system shutdown at SHUTDOWN! Useful for standalone boxes.
#
SYS_SHUTDOWN_ENABLE = 1
#
# Command to execute to shutdown the system
#
SYS_SHUTDOWN_CMD = "systemctl poweroff"
SYS_RESTART_CMD = "systemctl reboot"
# ======================================================================
# AUTOSHUTDOWN CONFIGURATION
# ======================================================================
# replace the default shutdown plugin
# plugin.remove('shutdown')
# plugin.activate('autoshutdown', level=90)
# activate the timer
# plugin.activate('autoshutdown.autoshutdowntimer')
# -- autoshutdown menu item configuration --
# SYS_SHUTDOWN_CONFIRM
# Set to True to popup dialog boxes for confirmation.
# this applies to menu items only.
# AUTOSHUTDOWN_CONFIRM = True
# -- autoshutdown timer configuration --
# TIMER_TIMEOUT
# Set the timeout in minutes after which the system
# is shutdown. The allowed idle time and the running
# processes (see below) are evaluated to determine if
# a shutdown is allowed. Menu navigation in freevo will
# reset the timer.
# AUTOSHUTDOWN_TIMER_TIMEOUT=30
# -- autoshutdown behaviour configuration --
# PRETEND
# Set to True to disable the actual shutdown command.
# AUTOSHUTDOWN_PRETEND = False
# PROCESS_LIST
# List the processes that will prevent an automatic
# shutdown. If there are important programs that
# should not be interrupted, then add them to this
# list. Set to None if a shutdown is always allowed.
# AUTOSHUTDOWN_PROCESS_LIST = [
# 'emerge',
# 'tvgids',
# 'transcode',
# 'cdrecord',
# 'mplayer',
# 'top'
# ]
# DEFAULT_WAKEUP_TIME
# Set the default time at which to wakeup if there
# are no recordings scheduled. The time is specified
# in localtime 24 hour format. Set to None to disable
# default wakeup time.
# AUTOSHUTDOWN_DEFAULT_WAKEUP_TIME = "13:00"
# FORCE_DEFAULT_WAKEUP
# Set to True to always wakeup at the default wakeup
# time. Set to False to only wakeup at the default
# wakeup time when no recordings are scheduled.
# AUTOSHUTDOWN_FORCE_DEFAULT_WAKEUP = True
# ALLOWED_IDLE_TIME
# The number of minutes that may be spent idle until
# the next scheduled recording or default wakeup. That
# is, if the gap between "now" and the next recording
# or default wakeup is less than the allowed idle time
# then a shutdown is not performed but the system is
# left running. If the period from now to the next
# recording or default wakeup is more than the allowed
# idle time, then the system is shut down and a wakeup
# is scheduled. Use this to minimize the number of
# shutdown/boot sequences when many short programs are
# recorded in a short period of time. Note that this
# variable is used by both the timer and the menu.
# AUTOSHUTDOWN_ALLOWED_IDLE_TIME = 45
# WAKEUP_TIME_PAD
# Amount of pad time (in seconds) to start system boot ahead of the next
# wakeup event so that system will be ready. Default is 180 (3 minutes).
# AUTOSHUTDOWN_WAKEUP_TIME_PAD = 180
# -- Choice of wakeup method
#
# The wakeup can be done via acpi-alarm or nvram-wakeup.
# AUTOSHUTDOWN_METHOD = 'acpi'
# AUTOSHUTDOWN_METHOD = 'nvram'
# -- autoshutdown acpi-alarm configuration
# This method uses the wakeup on alarm function that most BIOSs have.
# The wakeup time is set by a simple
#
# "echo 2004-08-02 20:15:00 >/proc/acpi/alarm"
#
# On most mainbords you will have to ENABLE "Wake on Timer", "Resume on Alarm",
# "RTC Alarm Resume" or similar things for the acpi wakeup method to work.
# If you want to use acpi, you need to create a small script:
#
# !/bin/sh
# echo "$1" >/proc/acpi/alarm
#
# You have to be root or use sudo for this to work.
# AUTOSHUTDOWN_WAKEUP_CMD = sudo /PATH/TO/set_acpi.sh
# -- autoshutdown nvram-wakeup configuration --
# The nvram-wakeup utility is used to write the
# wakeup alarm to the RTC in bios. Read the
# nvram-wakeup documentation about this topic,
# a working nvram-wakeup configuration is needed.
# WAKEUP_CMD / NVRAM_OPT
# Path to nvram-wakeup and options. Options can
# be used to specify a config file.
# AUTOSHUTDOWN_WAKEUP_CMD = "/usr/bin/nvram-wakeup"
# AUTOSHUTDOWN_NVRAM_OPT = "--syslog"
# WAKEUP_NEEDS_REBOOT
# Set to True if the bios needs a reboot to catch
# up with the rtc alarm that nvram-wakeup sets. The
# boot loader options should be set too. Read the
# nvram-wakeup documentation about this topic.
# AUTOSHUTDOWN_BIOS_NEEDS_REBOOT = True
# -- if the bios needs a reboot --
# BOOT_LOADER
# Set to "GRUB" or "LILO" Only needed if bios needs
# a reboot to initialize the RTC wakeup call.
# AUTOSHUTDOWN_BOOT_LOADER = "GRUB"
# REMOUNT_BOOT_CMD / REMOUNT_BOOT_OPT
# Grub needs to write to /boot/grub/grub.conf. Set
# the command and options to remount the /boot
# partition writeable. Set to None if this is not
# needed (default).
# AUTOSHUTDOWN_REMOUNT_BOOT_CMD = "/bin/mount"
# AUTOSHUTDOWN_REMOUNT_BOOT_OPT = "/boot -o remount,rw"
# GRUB_CMD / GRUB_OPT
# Grub-set-default command and options that will
# reboot and poweroff the system.
# AUTOSHUTDOWN_GRUB_CMD = "/sbin/grub-set-default 0"
# AUTOSHUTDOWN_GRUB_OPT = "0"
# LILO_CMD / LILO_OPT
# Lilo command with options that will reboot and
# poweroff the system.
# AUTOSHUTDOWN_LILO_CMD = "/sbin/lilo"
# AUTOSHUTDOWN_LILO_OPT = "-R PowerOff"
# ======================================================================
# Events
# ======================================================================
#
# You can add more keybindings by adding them to the correct hash.
# e.g. If you want to send 'contrast -100' to mplayer by pressing the '1' key,
# just add the following line:
#
# EVENTS['video']['1'] = Event(VIDEO_SEND_MPLAYER_CMD, arg='contrast -100')
#
# See src/event.py for a list of all possible events.
#
# Some events to jump to menus
#
# EVENTS['menu']['GUIDE'] = Event(MENU_GOTO_TVGUIDE) # Not working
# EVENTS['menu']['VIDEOS'] = Event(MENU_GOTO_VIDEOS)
# EVENTS['menu']['MUSIC'] = Event(MENU_GOTO_MUSIC)
# EVENTS['menu']['PICTURES'] = Event(MENU_GOTO_IMAGES)
# EVENTS['menu']['GAMES'] = Event(MENU_GOTO_GAMES)
# EVENTS['menu']['RADIO'] = Event(MENU_GOTO_RADIO) # Not working
# EVENTS['menu']['POWER'] = Event(MENU_GOTO_SHUTDOWN)
#
# Use arrow keys for back and select (alternate way of navigating)
#
# MENU_ARROW_NAVIGATION = False
#
# Process keyboard events from SDL. You want this unless you use only lirc
# or event devices below.
#
# SYS_USE_KEYBOARD = True
#
# Process mouse events from SDL/Pygame. You want this to control Freevo
# with a mouse
#
# SYS_USE_MOUSE = False
#
# Keymap to map keyboard keys to event strings. You can also add new keys
# here, e.g. KEYMAP[key.K_x] = 'SUBTITLE'. The K_-names are defined by pygame.
#
#
# List of /dev/input/event# devices to monitor. You can specify either the
# device node (e.g. '/dev/input/event1') or the name of the device (e.g.
# 'ATI Remote Wonder II'). If you monitor your keyboard both here and with
# SYS_USE_KEYBOARD, then you will get duplicate events.
#
EVENT_DEVS = []
# Keymap to map input events to event strings. You can change current mappings
# and add new ones here, e.g. EVENTMAP['KEY_COFFEE'] = 'SUBTITLE'. Key names
# are defined by the Linux input layer (input.h). An axis is described by a
# pair, one for positive and one for negative movement, e.g.
#
# EVENTMAP['REL_Z'] = ('LEFT', 'RIGHT')
# Use Internet resources to fetch information?
# For example, Freevo can use CDDB for album information,
# the IMDB movie database for movie info, and Amazon for cover searches.
# Set this to 0 if your computer isn't connected to a network.
#
# SYS_USE_NETWORK = True
#
# Directory to store temporary files
#
# FREEVO_TEMPDIR = '/tmp'
#
# Directory location to save files when the normal filesystem
# doesn't allow saving. This directory can save covers and fxd files
# for read only filesystems like ROM drives. Set this variable to your
# old MOVIE_DATA_DIR if you have one. It needs to be set to a directory
# Freevo can write to.
#
# OVERLAY_DIR = os.path.join(FREEVO_CACHEDIR, 'vfs')
#
# Umask setting for all files.
# 022 means only the user has write access. If you share your Freevo
# installation with different users, set this to 002
#
# UMASK = 022
#
# Suffix for playlist files
#
# PLAYLIST_SUFFIX = [ 'm3u' ]
#
# Use md5 in mmpython to create unique disc ids. Enable this if you have
# problems with different discs having the same id.
#
# MMPYTHON_CREATE_MD5_ID = 0
#
# Keep metadata in memory
# Setting this variable will keep all cache files in memory. Startup will be
# slower, but for large directories, this will speed up the display.
# 0 = Only keep current dir in memory. Use this if you have too much data
# and not enough RAM
# 1 = Once loaded, keep cachefile for directory in memory
# 2 = Load all cachefiles on startup
#
# WARNING: you should not run 'freevo cache' when freevo is running.
#
# MEDIAINFO_USE_MEMORY = 1
#
# Cache images. This uses a lot of disc space but it's a huge speed
# enhancement. The images will be cached in OVERLAY_DIR
#
CACHE_IMAGES = 1
# ======================================================================
# Plugins:
# ======================================================================
# Remove undesired plugins by setting plugin.remove(code).
# You can also use the name to remove a plugin. But if you do that,
# all instances of this plugin will be removed.
#
# Examples:
# plugin.remove(plugin_tv) or
# plugin.remove('tv') will remove the tv module from the main menu
# plugin.remove(rom_plugins['image']) will remove the rom drives from the
# image main menu,
# plugin.remove('rom_drives.rom_items') will remove the rom drives from all
# menus
#
# See freevo_config.py for a list of loaded plugins
#
# Use ivtv_record instead if you have an ivtv based card (PVR-250/350)
# and want freevo to do everthing for you. TV_SETTINGS must be set
# correctly. To use you need to set the following two lines:
#
# plugin.remove('tv.generic_record')
# plugin_record = plugin.activate('tv.ivtv_record')
#
# Enable this for joystick support:
# plugin.activate('joy')
# ----------------------------------------------------------------------
# Headlines
#
# You are free to use any rss feeds in the HEADLINES_LOCATIONS below
# These are just working examples for the Freevo feeds.
# To turn off Headlines add plugin.remove('headlines')
# ----------------------------------------------------------------------
# plugin.activate('headlines', level=45)
# HEADLINES_LOCATIONS = [
# ('Freevo news releases', 'http://sourceforge.net/export/rss2_projnews.php?group_id=46652'),
# ('Freevo file releases', 'http://sourceforge.net/export/rss2_projfiles.php?group_id=46652'),
# ('Freevo summary+stats', 'http://sourceforge.net/export/rss2_projsummary.php?group_id=46652'),
# ('Freevo donors', 'http://sourceforge.net/export/rss2_projdonors.php?group_id=46652'),
# ]
# ----------------------------------------------------------------------
# Speak using Festival
# ----------------------------------------------------------------------
# Speak plugin to output menu items via festival
# plugin.activate('speak')
# SPEAK_WELCOME = ''
# SPEAK_SHUTDOWN = ''
# ----------------------------------------------------------------------
# CD Ripping
# ----------------------------------------------------------------------
# CD_RIP_TMP_DIR = '/tmp/'
# CD_RIP_TMP_NAME = 'track_%(track)s_being_ripped'
# CD_RIP_PN_PREF = '%(artist)s/%(album)s/%(track)s - %(song)s'
# CD_RIP_CDPAR_OPTS = '-s'
# CD_RIP_LAME_OPTS = '--vbr-new -b 192 -h'
# CD_RIP_OGG_OPTS = '-m 128'
# CD_RIP_FLAC_OPTS = '-8'
# CD_RIP_CASE = None # Can be title, upper, lower
# CD_RIP_REPLACE_SPACE = None # Can be '_', '-', etc.
# ----------------------------------------------------------------------
# CD Burning
# ----------------------------------------------------------------------
# CDBURN_AUDIO_DAO = 1
# CDBURN_MKISOFS_PATH = '/usr/bin/mkisofs'
# CDBURN_CDRECORD_PATH = '/usr/bin/cdrecord'
# CDBURN_TEMP_DIR='/tmp/'
# CDBURN_DEV = '/dev/cdrom'
# CDBURN_SPEED = 32
# ----------------------------------------------------------------------
# Re-encode
# This plug-in transcodes a video to a different format
# ----------------------------------------------------------------------
# plugin.activate('video.reencode')
# REENCODE_CONTAINER = 'avi'
# REENCODE_RESOLUTION = 'Optimal'
# REENCODE_VIDEOCODEC = 'XviD'
# REENCODE_VIDEOBITRATE = 1000
# REENCODE_AUDIOCODEC = 'MPEG 1 Layer 3 (mp3)'
# REENCODE_AUDIOBITRATE = 128
# REENCODE_NUMPASSES = 1
# REENCODE_VIDEOFILTER = None
# ----------------------------------------------------------------------
# Freevo Music Player Daemon
# http://www.musicpd.org/
# ----------------------------------------------------------------------
# plugin.activate('mpd')
# ----------------------------------------------------------------------
# Freevo Bluetooth Phone Settings
# ----------------------------------------------------------------------
# This plugin uses The Python bindings for Bluez bluetooth stack.
#
# It can be downloaded from http://org.csail.mit.edu/pybluez/download.html
# or installed with the package manager of your operating system.
#
# To this plugin to work you need the j2me midlet installed in a compatible
# phone.
#
# plugin.activate('freevused')
# if RFCOMM port is already binded wait this seconds to retry binding
# FVUSED_BIND_TIMEOUT = 30
# Translation of commands from j2me client to events of Freevo
# FVUSED_CMDS = {
# 'PREV': 'UP', # 1st row left
# 'STRT': 'SELECT', # 1nd row center
# 'NEXT': 'DOWN', # 1st row right
# 'RWND': 'LEFT', # 2nd row left
# 'PAUS': 'PAUSE', # 2nd row center
# 'FFWD': 'RIGHT', # 2nd row right
# 'VOL-': 'MIXER_VOLDOWN', # 3rd row left
# 'STOP': 'EXIT', # 3rd row center
# 'VOL+': 'MIXER_VOLUP', # 3rd row right
# 'VOLM': 'MIXER_VOLMUTE', # 4th row left
# 'SLCT': 'ENTER', # 4th row center
# 'MAIN': 'MENU' # 4th row right
# }
# ----------------------------------------------------------------------
# Freevo Button Bar Plug-in
# ----------------------------------------------------------------------
# plugin.activate('buttonbar')
# You'll also need to map the 'RED', 'GREEN', 'YELLOW'
# and 'BLUE' events to keys for example (don't use this
# it overrides some default keys)
# KEYMAP[key.K_F5] = 'RED'
# KEYMAP[key.K_F6] = 'GREEN' #Already mapped to record
# KEYMAP[key.K_F7] = 'YELLOW'
# KEYMAP[key.K_F8] = 'BLUE'
# ----------------------------------------------------------------------
# Freevo Screensaver Plug-in
# ----------------------------------------------------------------------
# SCREENSAVER_DELAY = 120 # of seconds to wait to start saver.
# SCREENSAVER_CYCLE_TIME = 60 # of seconds to run a screensaver before starting another saver.
# plugin.activate('screensaver')
# plugin.activate('screensaver.balls') # Bouncing balls all over the screen
# plugin.activate('screensaver.bouncing_freevo') # The freevo logo bouncing around the screen
# ======================================================================
# Freevo directory settings:
# ======================================================================
# You can change all this variables in the folder.fxd on a per folder
# basis
#
# Example:
# <freevo>
# <folder title="Title of the directory" img-cover="nice-cover.png">
# <setvar name="directory_autoplay_single_item" val="0"/>
# <info>
# <content>A small description of the directory</content>
# </info>
# </folder>
# </freevo>
#
# Should directories sorted by date instead of filename?
# 0 = No, always sort by filename.
# 1 = Yes, sort by date
# 2 = No, don't sory by date for normal directories,
# but sort by date for TV_RECORD_DIR.
#
# DIRECTORY_SORT_BY_DATE = 2
#
# Should directory items be sorted in reverse order?
#
# DIRECTORY_REVERSE_SORT = 0
#
# Should we use "smart" sorting?
# Smart sorting ignores the word "The" in item names.
#
# DIRECTORY_SMART_SORT = 0
#
# Should files in directories have smart names?
# This removes the first part of the names when identical
#
# DIRECTORY_SMART_NAMES = 1
#
# Should Freevo autoplay an item if only one item is in the directory?
#
# DIRECTORY_AUTOPLAY_SINGLE_ITEM = 1
#
# Force the skin to use a specific layout number. -1 == no force. The layout
# toggle with DISPLAY will be disabled
#
# DIRECTORY_FORCE_SKIN_LAYOUT = -1
#
# Format string for the audio item names.
#
# Possible strings:
# a=artist, n=tracknumber, t=title, y=year, f=filename
#
# Example:
# This will show the title and the track number:
# DIRECTORY_AUDIO_FORMAT_STRING = '%(n)s - %(t)s'
#
# DIRECTORY_AUDIO_FORMAT_STRING = '%(t)s'
#
# Use media id tags to generate the name of the item. This should be
# enabled all the time. It should only be disabled for directories with
# broken tags.
#
# DIRECTORY_USE_MEDIAID_TAG_NAMES = 1
#
# The following settings determine which features are available for
# which media types.
#
# If you set this variable in a folder.fxd, the value is 1 (enabled)
# or 0 (disabled).
#
# Examples:
# To enable autoplay for audio and image files:
# DIRECTORY_AUTOPLAY_ITEMS = [ 'audio', 'image' ]
# To disable autoplay entirely:
# DIRECTORY_AUTOPLAY_ITEMS = []
# Make all items a playlist. So when one is finished, the next one will
# start. It's also possible to browse through the list with UP and DOWN
#
# DIRECTORY_CREATE_PLAYLIST = [ 'audio', 'image' ]
# Add playlist files ('m3u') to the directory
#
# DIRECTORY_ADD_PLAYLIST_FILES = [ 'audio', 'image' ]
# Add the item 'Random Playlist' to the directory
#
# DIRECTORY_ADD_RANDOM_PLAYLIST = [ 'audio' ]
# Make 'Play' not 'Browse' the default action when only items and not
# subdirectories are in the directory
#
# DIRECTORY_AUTOPLAY_ITEMS = [ ]
# ======================================================================
# Freevo movie settings:
# ======================================================================
#
# Where the movie files can be found.
# This is a list of items (e.g. directories, fxd files). The items themselves
# can also be a list of (title, file)
#
# VIDEO_ITEMS = [
# ('action movies', '/freevo/movies/action'),
# ('funny stuff', '/freevo/movies/comedy'),
# ]
#
# Some people access movies on a different machine using an automounter.
# To avoid timeouts, you can specify the machine name in the directory
# to check if the machine is alive first
# Directory myserver:/files/server-stuff will show the item for the
# directory /files/server-stuff if the computer myserver is alive.
#
# The list of filename suffixes that are used to match the files that
# are played wih MPlayer.
#
# VIDEO_MPLAYER_SUFFIX = [
# 'avi', 'mpg', 'mpeg', 'wmv', 'bin', 'rm', 'divx', 'ogm', 'vob', 'asf',
# 'm2v', 'm2p', 'mp4', 'viv', 'nuv', 'mov', 'iso', 'nsv', 'mkv', 'ogg',
# 'ts', 'flv',
# ]
#
# The list of filename suffixes that are used to match the files that
# are played wih Xine.
#
# VIDEO_XINE_SUFFIX = [
# 'avi', 'mpg', 'mpeg', 'rm', 'divx', 'ogm', 'asf', 'm2v', 'm2p', 'mp4',
# 'mov', 'cue', 'ts', 'iso', 'vob',
# ]
#
# Preferred video player
#
# VIDEO_PREFERED_PLAYER = 'mplayer'
#
# Only scan OVERLAY_DIR and VIDEO_SHOW_DATA_DIR for fxd files containing
# information about a disc. If you only have the fxd files for discs in
# one of this directories (and subdirectories), set this to 1, it will
# speed up startup, 0 may be needed if you have fxd files with disc links
# in your normal movie tree.
#
# VIDEO_ONLY_SCAN_DATADIR = 1
#
# try to detect a movie with more than one file and join them as one
# item
#
# VIDEO_AUTOJOIN = 1
#
# join files based on the regular expression
# seaches for 1, 01, 001, etc before a '.'; possibly too simple
#
# VIDEO_AUTOJOIN_REGEX='(0*1)\.'
#
# try to find out if deinterlacing is needed or not
#
# VIDEO_DEINTERLACE = None
#
# Instruct player to use XVMC for playback
#
# VIDEO_USE_XVMC = None
#
# Pass field dominance parameter to MPlayer
#
# VIDEO_FIELD_DOMINANCE = None
# PRE and POST playing commands. Set these to a runnable command if
# you wish to do something before and after playing a video, like
# dimming the lights
# VIDEO_PRE_PLAY = None
# VIDEO_POST_PLAY = None
# ======================================================================
# Freevo audio settings:
# ======================================================================
#
# Where the Audio (mp3, ogg) files can be found.
# This is a list of items (e.g. directories, fxd files). The items themselves
# can also be a list of (title, file)
#
# To add webradio support, add fxd/webradio.fxd to this list
#
# AUDIO_ITEMS = [
# ('Music Collection', '/freevo/audio/mp3'),
# 'fxd/webradio.fxd',
# ]
#
# The list of filename suffixes that are used to match the files that
# are played as audio.
#
# AUDIO_SUFFIX = [
# 'mp3', 'ogg', 'wav', 'm4a', 'wma', 'aac', 'flac', 'mka', 'ac3',
# ]
#
# Regular expression used to recognize filenames which are likely to be
# covers for an album
#
# This will match front.jpg and cover-f.jpg, but not back.jpg nor cover-b.jpg:
# AUDIO_COVER_REGEXP = 'front|-f'
#
# Format strings used to seach for audio cover images.
# Fist matching GIF, JPG or PNG image will be used as cover.
#
# Examples:
# AUDIO_COVER_FORMAT_STRINGS = [ 'cover-%(artist)s-%(album)s', 'mycover' ]
# AUDIO_COVER_FORMAT_STRINGS = [ '%(album)s', '../covers/%(album)s', '../covers/nocover' ]
#
#
# Preferred audio player
#
# AUDIO_PREFERED_PLAYER = 'mplayer'
#
# Show video files in the audio menu (for music-videos)
#
# AUDIO_SHOW_VIDEOFILES = False
# ======================================================================
# Freevo image viewer settings:
# ======================================================================
#
# Where image files can be found.
# This is a list of items (e.g. directories, fxd files). The items itself
# can also be a list of (title, file)
#
# IMAGE_ITEMS = [
# ('My Photos', '/freevo/images'),
# ]
#
# The list of filename suffixes that are used to match the files that
# are used for the image viewer.
#
# IMAGE_SUFFIX = [ 'jpg','gif','png','jpeg','bmp','tiff','psd' ]
#
# The viewer now supports a new type of menu entry, a slideshow file.
# It also has the slideshow alarm signal handler for automated shows.
# It uses a new configuration option:
#
# IMAGE_SSHOW_SUFFIX = [ 'ssr' ]
# The viewer can exclude certain types of images based on the regular expression list
# eg IMAGE_EXCLUDE = [('thm','tn_')]
# IMAGE_EXCLUDE = None
#
# Mode of the blending effect in the image viewer between two images
# Possible values are:
#
# None: no blending
# -1 random effect
# 0 alpha blending
# 1 wipe effect
# IMAGEVIEWER_BLEND_MODE = -1
#
# Duration to wait in the slideshow:
#
# IMAGEVIEWER_DURATION = 3
#
# If set to False, the slideshow must be started
# manualy with the play button:
#
# IMAGEVIEWER_AUTOPLAY = True
#
# When viewing images on a TV screen where the pixels are not square
# the images need to be scaled according to the aspect ratio of the TV
# Use this setting for 16x9 TVs
# IMAGEVIEWER_ASPECT = (float(1024) / float(720))
# Use this setting for 4x3 TVs
# IMAGEVIEWER_ASPECT = (float(768) / float(720))
# Use this setting for Monitors including HDTVs
# IMAGEVIEWER_ASPECT = 1.0
#
# IMAGEVIEWER_ASPECT = 1.0
# ======================================================================
# Freevo games settings:
# ======================================================================
#
# MAME is an emulator for old arcade video games. It supports almost
# 2000 different games! The actual emulator is not included in Freevo,
# you'll need to download and install it separately. The main MAME
# website is at http://www.mame.net, but the version that is used here
# is at http://x.mame.net since the regular MAME is for Windows.
#
# SNES stands for Super Nintendo Entertainment System. Freevo relies
# on other programs that are not included in Freevo to play these games.
#
# NEW GAMES SYSTEM :
# =================
# The GAMES_ITEMS structure is now build as follows :
# <NAME>, <FOLDER>, (<TYPE>, <COMMAND_PATH>, <COMMAND_ARGS>, <IMAGE_PATH>, \
# [<FILE_SUFFIX_FOR_GENERIC>])
# where :
# - <TYPE> : Internal game types (MAME or SNES) or
# generic one (GENERIC)
# - <COMMAND_PATH> : Emulator command
# - <COMMAND_ARGS> : Arguments for the emulator
# - <IMAGE_PATH> : Optionnal path to the picture
# - <FILE_SUFFIX_FOR_GENERIC> : If the folder use the GENERIC
# type, then you must specify here
# the file suffix used by the emulator
# GAMES_ITEMS = [
# ('MAME', '/home/media/games/xmame/roms',
# ('MAME', '/usr/local/bin/xmame.SDL', '-fullscreen -modenumber 6',
# '/home/media/games/xmame/shots', None)),
# ('SUPER NINTENDO', '/home/media/games/snes/roms',
# ('SNES', '/usr/local/bin/zsnes', '-m -r 3 -k 100 -cs -u', '', None )),
# ('Visual Boy Advance', '/home/media/games/vba/roms/',
# ('GENERIC', '/usr/local/vba/VisualBoyAdvance', ' ', '', [ 'gba' ] )),
# ('MEGADRIVE', '/home/media/games/megadrive/roms',
# ('GENESIS', '/usr/local/bin/generator-svgalib', '', '', '' ))
# ]
#
# GAMES_ITEMS = None
#
# These settings are used for the MAME arcade emulator:
#
# Priority of the game process
# 0 = Don't change the priority
# >0 - Lower priority
# <0 - Higher priority
#
# GAMES_NICE = -20
#
# MAME cache directory
#
# GAMES_MAME_CACHE = '%s/romlist-%s.pickled' % (FREEVO_CACHEDIR, os.getuid())
# ======================================================================
# Freevo SKIN settings:
# ======================================================================
#
# XML file for the skin. If SKIN_XML_FILE is set, this skin will be
# used, otherwise the skin will rememeber the last choosen skin.
#
# SKIN_XML_FILE = 'blurr'
#
#
# Select a way when to switch to text view even if a image menu is there
#
# 1 = Force text view when all items have the same image and there are no
# directories
# 2 = Ignore the directories, always switch to text view when all images
# are the same
#
# SKIN_FORCE_TEXTVIEW_STYLE = 1
#
# Force text view for the media menu
# (The media menu is the first menu displayed for video, audio, images
# and games).
#
# SKIN_MEDIAMENU_FORCE_TEXTVIEW = 0
# ======================================================================
# Freevo OSD settings:
# ======================================================================
#
# System Path to search for fonts not included in the Freevo distribution
#
# OSD_EXTRA_FONT_PATH = [ '/usr/share/fonts/truetype' ]
#
# Font aliases
# All names must be lowercase! All alternate fonts must be in './share/fonts/'
#
# OSD_FONT_ALIASES = { 'arial_bold.ttf' : 'VeraBd.ttf' }
# For non-european character sets the OSD_FORCE_FONTNAME and
# OSD_FORCE_FONTSIZE can be set. The size is a scaling ratio, ie 1.2.
# Setting OSD_FORCE_FONTNAME='batang.ttf' and OSD_FORCE_FONTSIZE=1.0
# allows Korean characters.
# OSD_FORCE_FONTNAME = None
# OSD_FORCE_FONTSIZE = None
#
# Number of seconds to wait until the busy icon is shown in the menu.
# Busy icon can also be shown right away when there is more than a certain
# number of files in a directory.
#
# Set this to None to disable this.
# (seconds, files)
#
# OSD_BUSYICON_TIMER = (0.7, 200)
#
# Number of pixels to move the display to centre the OSD on the display
#
# OSD_OVERSCAN_LEFT = OSD_OVERSCAN_RIGHT = 0
# OSD_OVERSCAN_TOP = OSD_OVERSCAN_BOTTOM = 0
#
# Setting the cursors when freevo is run in fullscreen mode
#
# OSD_X11_CURSORS = '/usr/lib/X11/cursors/black.cursor /usr/lib/X11/cursors/blank.cursor'
#
# Execute a script on OSD startup.
#
# OSD_SDL_EXEC_AFTER_STARTUP = ""
#
# Execute a script on OSD close.
#
# OSD_SDL_EXEC_AFTER_CLOSE = ""
#
# Stop the osd before playing a movie with xine or mplayer. Some output
# devices need this. After playback, the osd will be restored
#
# OSD_STOP_WHEN_PLAYING = 0
#
# Dim text that doesn't fit instead of using ellipses.
# OSD_DIM_TEXT = 1
#
# OSD sound effects
#
# OSD_SOUNDS_ENABLED=False
# OSD_SOUNDS= {
# 'menu.navigate': None,
# 'menu.back_one': None,
# 'menu.select' : None
# }
#
# Padding between icons
#
# OSD_IDLEBAR_PADDING = 20
# OSD_IDLEBAR_FONT = 'small0'
# OSD_IDLEBAR_CLOCK_FONT = 'clock'
# ======================================================================
# Freevo remote control settings:
# ======================================================================
#
# Location of the lircrc file
#
# For remote control support, Freevo needs a lircrc file, like this:
#
# begin
# prog = freevo
# button = select
# config = SELECT
# end
#
# Check contrib/lirc for examples and helpers/freevo2lirc.pl for a converter
# script.
#
# LIRCRC = '/etc/freevo/lircrc'
#
# Joystick support
# 0 = Disable joystick support
# 1 = Use js0,
# 2 = Use js1,
# ... etc
#
# JOY_DEV = 0
# JOY_SENS = 32000
# JOY_LOCKFILE = /path/to/joystick/lockfile
# Freevo can support as many buttons as your controller has
# as long as there is a corresponding entry in JOY_CMDS.
# You will also need to plugin.activate('joy').
# FYI: new kernels use /dev/input/jsX, but joy.py will fall back on /dev/jsX
#
# JOY_CMDS = {
# 'up' : 'UP',
# 'down' : 'DOWN',
# 'left' : 'LEFT',
# 'right' : 'RIGHT',
# 'button 1' : 'PLAY',
# 'button 2' : 'PAUSE',
# 'button 3' : 'STOP',
# 'button 4' : 'ENTER',
# }
# Here are the PS3 joy stick mappings
# JOY_CMDS = {
# 'button 5' : 'UP',
# 'button 7' : 'DOWN',
# 'button 8' : 'LEFT',
# 'button 6' : 'RIGHT',
# 'up' : 'UP',
# 'down' : 'DOWN',
# 'left' : 'LEFT',
# 'right' : 'RIGHT',
# 'button 14' : 'SELECT', # <circle>
# 'button 16' : 'EXIT', # <square>
# 'button 15' : 'STOP', # <x>
# 'button 1' : 'ENTER', # <select>
#
# 'button 11' : 'VOL+', # <L1>
# 'button 9' : 'VOL-', # <L2>
# 'button 2' : 'MUTE', # <L3>
#
# 'button 10' : 'MUTE', # <R2>
# 'button 4' : 'PLAY', # <start>
# 'button 13' : 'PLAY', # <start>
# 'button 17' : 'MENU', # <ps>
# }
# ======================================================================
# TVtime settings:
# ======================================================================
#
# Location of the TV time program
# Default: Use the value in freevo.conf
#
# TVTIME_CMD = CONF.tvtime
# ======================================================================
# MPlayer settings:
# ======================================================================
# MPLAYER_AO_DEV = 'oss:/dev/dsp' # e.g.: oss,sdl,alsa, see mplayer docs
# MPLAYER_VO_DEV specifies the -vo option to mplayer, if the display in
# freevo.conf is X11 then this defaults to xv,sdl,x11, see mplayer -vo help for
# the full list of output drivers.
# MPLAYER_VO_DEV = 'dfbmga' # X11 drivers in order of preference
# MPLAYER_VO_DEV_OPTS = '' # e.g.: ':some_var=vcal'
# MPLAYER_AUDIO_CACHE_KB = 256
# MPLAYER_AUDIO_CACHE_MIN_PERCENT = 25
# MPLAYER_AUDIO_NETWORK_OPTS = '-cache %d -cache-min %d' % (MPLAYER_AUDIO_CACHE_KB, MPLAYER_AUDIO_CACHE_MIN_PERCENT)
# DVD_LANG_PREF = 'en,se,no' # Order of preferred languages on DVD.
# DVD_SUBTITLE_PREF = '' # Order of preferred subtitles on DVD.
# Priority of mplayer process. 0 is unchanged, <0 is higher prio, >0 lower prio.
# prio <0 has no effect unless run as root.
# MPLAYER_NICE = -20
#
# Mplayer options to use the software scaler. If your CPU is fast enough, you
# might try a software scaler. You can disable it later for some larger files
# with the mplayer option '-nosws'. If you have -framedrop or -hardframedrop
# as mplayer option, the software scaler will also not be used.
# A good value for this variable is:
# MPLAYER_SOFTWARE_SCALER = "-subfont-text-scale 5 -fs -sws 2 -vf scale=%s:-3,"\
# "expand=%s:%s " % ( CONF.width, CONF.width, CONF.height )
# older versions of mplayer may need
# MPLAYER_SOFTWARE_SCALER = '-xy %s -sws 2 -vop scale:-1:-1:-1:100' % CONF.width
#
# MPLAYER_SOFTWARE_SCALER = ''
#
# Mplayer arguments for different media formats. (eg DVDs, CDs, AVI files, etc)
# Uses a default value if nothing else matches.
#
# MPLAYER_ARGS['dvd'] = '-cache 8192'
# MPLAYER_ARGS['vcd'] = '-cache 4096'
# MPLAYER_ARGS['cd'] = '-cache 1024 -cdda speed=2'
# MPLAYER_ARGS['tv'] = '-nocache'
# MPLAYER_ARGS['ivtv'] = '-cache 8192'
# MPLAYER_ARGS['avi'] = '-cache 5000 -idx'
# MPLAYER_ARGS['flv'] = '-nocache -forceidx'
# MPLAYER_ARGS['mp4'] = '-nocache -forceidx'
# MPLAYER_ARGS['rm'] = '-cache 5000 -forceidx'
# MPLAYER_ARGS['rmvb'] = '-cache 5000 -forceidx'
# MPLAYER_ARGS['webcam'] = 'tv:// -tv driver=v4l:width=352:height=288:outfmt=yuy2:device=/dev/video2'
# MPLAYER_ARGS['default'] = '-cache 5000'
#
# Number of seconds before seek value times out. This is used when
# seeking a specified number of minutes into a movie. If you make
# a mistake or change your mind, the seek value will timeout after
# this many seconds.
#
# MPLAYER_SEEK_TIMEOUT = 8
#
# Autocrop files when playing. This is useful for files in 4:3 with black
# bars on a 16:9 tv
#
# MPLAYER_AUTOCROP = 0
#
# Try to set correct 'delay' and 'mc' values for mplayer based on the delay
# from mmpython.
#
# This should correct av sync problems with mplayer for some files, but
# may also break things. (I don't know, that's why it's disabled by default).
# WARNING: When seeking, the playback is out of sync for some seconds!
#
# MPLAYER_SET_AUDIO_DELAY = 0
#
# Mplayer video filter for interlaced or progressive videos. If you have
# a slow pc, do not use post processing
# MPLAYER_VF_INTERLACED = ''
# MPLAYER_VF_PROGRESSIVE = 'pp=fd'
# For pal and dvb-t recordings, the following looks good
# MPLAYER_VF_INTERLACED = 'pp=md/de,phase=U'
#
# MPLAYER_VF_INTERLACED = 'pp=de/fd'
# MPLAYER_VF_PROGRESSIVE = 'pp=de'
#
# For the autodetect TOP/BOTTOM field first in mplayer
# (if this feature present in mplayer)
# MPLAYER_HAS_FIELD_DOMINANCE = 1
#
# ======================================================================
# Xine settings:
# ======================================================================
# You need xine-ui version greater 0.9.21 to use the all the features
# of the xine plugin
# XINE_VO_DEV = 'xv'
# XINE_COMMAND = '%s --auto-play=fq --hide-gui --borderless --geometry %sx%s+0+0 --no-splash' % \
# (CONF.xine, CONF.width, CONF.height)
# XINE_ARGS_DEF = "--no-lirc --post='pp:quality=10;expand'"
# XINE_AO_DEV = 'oss' # alsa or oss
# Set to False if xine doesn't have '--no-lirc' option
# XINE_HAS_NO_LIRC = True
# Set to True is xine supports get_time this enables the position to be saved
# XINE_BOOKMARK = False
# ======================================================================
# Freevo TV settings:
# ======================================================================
#
# This is where recorded video is written.
#
# XXX the path doesn't work from the www cgi scripts!
# TV_RECORD_DIR = None
# This will enable duplicate recording detection
# TV_RECORD_DUPLICATE_DETECTION = True
# This will enable only new episodes to be recorded
# TV_RECORD_ONLY_NEW_DETECTION = True
# If true, this will automatically re-encode shows after they finish recording
# NOTE: You need to set up the REENCODE_* variables as for the reencode.py plugin -
# these default values will be used to re-encode the recordings
# TV_REENCODE = False
# If true, this will remove the original recording after automatic re-encoding is done
# TV_REENCODE_REMOVE_SOURCE = False
# ================================================================================
# Watching TV
# ================================================================================
#
# XXX You must change this to fit your local conditions!
#
# NORM: ntsc, pal, secam
# INPUT: television, composite1
# CHANLIST: One of the following:
#
# us-bcast, us-cable, us-cable-hrc, japan-bcast, japan-cable, europe-west,
# europe-east, italy, newzealand, australia, ireland, france, china-bcast,
# southafrica, argentina, canada-cable, russia
#
# TV_SETTINGS = 'NORM INPUT CHANLIST DEVICE'
# eg: TV_SETTINGS = 'ntsc television us-cable /dev/video0'
#
# Video input device
#
# Usually /dev/video0, but might be /dev/video1 instead for multiple boards.
#
# FreeBSD uses the Brooktree TV-card driver, not V4L.
#
# For Linux TV_DRIVER can be 'v4l' or 'v4l2' and depends on the driver
# TV_DRIVER = 'v4l2'
# TV_DEVICE = '/dev/video0'
# TV_INPUT = 0
#
# ================================================================================
# Listening to radio
# ================================================================================
#
# Radio device default is None, /dev/video24 for ivtv
# RADIO_DEVICE = '/dev/radio'
# Radio commands:
# plugin.activate('audio.radioplayer')
# plugin.activate('audio.radio')
#
# for fmtools
# RADIO_CMD = 'fm'
# RADIO_CMD_START = (RADIO_CMD + ' -d %s ' % RADIO_DEVICE + ' -q %s 65535')
# RADIO_CMD_STOP = (RADIO_CMD + ' -d %s ' % RADIO_DEVICE + ' -q off')
#
# for ivtv-radio
# RADIO_CMD = '/usr/bin/ivtv-radio -d /dev/radio0 -i /dev/video24'
# RADIO_STATIONS = [
# ('DRS 1', '94.80'),
# ('VIRUS', '104.30'),
# (u'Radio Zürisee', '90.20'),
# ]
#
# Additional options to pass to mplayer in TV mode.
#
# eg. To turn off deinterlacing:
# TV_OPTS = '-vop pp=ci'
#
# TV_OPTS = ''
# TV_SETTINGS = '%s television %s %s' % (CONF.tv, CONF.chanlist, TV_DEVICE)
#
# Size (in MB) of the timeshift buffer. (ie: how long you can pause tv for.)
# This is set to a low default because the default buffer location is
# under FREEVO_CACHEDIR and we don't want to blow /var or /tmp.
# TIMESHIFT_BUFFER_SIZE = 128
# TIMESHIFT_ENCODE_CMD = 'mp1e -m3 -c%s -p%s -r14,100' % \
# (TV_SETTINGS.split()[3], AUDIO_INPUT_DEVICE)
# TIMESHIFT_BUFFER = '%s/timeshift.mpeg' % FREEVO_CACHEDIR
# TV_DATE_FORMAT = '%e-%b' # Day-Month: 11-Jun
# TV_TIME_FORMAT = '%H:%M' # Hour-Minute 14:05
# TV_DATETIME_FORMAT = '%A %b %d %I:%M %p' # Thursday September 24 8:54 am
# This is the filename format for files recorded using Freevo.
# You can use any of the strftime variables in it, provided you
# put two '%%' at the beginning.
#
# Some examples:
# %%A - Full weekday name.
# %%H - Hour (24-hour clock) as a decimal number [00,23].
# %%M - Minute as a decimal number [00,59].
# %%m - Month as a decimal number [01,12].
# %%d - Day of the month as a decimal number [01,31].
# %%p - Locale's equivalent of either AM or PM.
#
# More can be found at: http://www.python.org/doc/current/lib/module-time.html
# TV_RECORD_FILE_MASK = '%%m-%%d %%H:%%M %(progname)s - %(title)s'
# If using the persistent recordserver
# TV_RECORD_SCHEDULE = FREEVO_STATICDIR + '/schedule.pickle'
# TV_RECORD_FAVORITES = FREEVO_STATICDIR + '/favorites.pickle'
# TV_RECORD_FAVORITES_LIST = FREEVO_STATICDIR + '/favorites.txt'
# RECORDSERVER_IP = 'localhost'
# RECORDSERVER_PORT = 18001
# RECORDSERVER_SECRET = 'secret1'
# The timer offset when to check the next recording; in the USA use 0
# RECORDSERVER_ATTIMER = 45
# If the recordserver runs as root, set the uid to the given one
# after startup. The gui must also match one of the users group ids
# RECORDSERVER_UID = 0
# RECORDSERVER_UID = 0
# Remove old recordings if GB free is less than specified value
# RECORDSERVER_CLEANUP_THRESHOLD = 0
# start every recording X minutes before scheduled,
# and stop X minutes after scheduled - default to zero minutes.
# This must be a value in seconds although at the moment only has
# the percision of one minute.
# TV_RECORD_PADDING_PRE = 0
# TV_RECORD_PADDING_POST = 0
# Number of minutes before or after the start time of a favorite where
# a program matching the name, day of week etc should still be considered a
# favorite. For example a favorite has a start time of 21.00, but the program
# has been brought forward by the broadcaster by 10 minutes to 20.50, with
# a margin of less than 10 this program will not be recorded as the start time
# is outside the margin. But if the margin is set at 10 minutes or greater this
# program will be considered a favorite and recorded. Probably about 45 minutes
# is the best bet, better a false positive than a false negative.
# TV_RECORD_FAVORITE_MARGIN = 45
# PRE and POST recording commands. Set these to a runnable command if
# you wish to have special mixer settings or video post processing.
# VCR_PRE_REC = None
# VCR_POST_REC = None
# VCR_AUDIO = (':adevice=%s' % AUDIO_DEVICE +
# ':audiorate=32000' + # 44100 for better sound
# ':forceaudio:forcechan=1:' + # Forced mono for bug in my driver
# 'buffersize=64') # 64MB capture buffer, change?
# TV capture size for viewing and recording. Max 768x480 for NTSC,
# 768x576 for PAL. Set lower if you have a slow computer!
#
# For the 'tvtime' TV viewing application, only the horizontal size is used.
# Set the horizontal size to 400 or 480 if you have a slow (~500MHz) computer,
# it still looks OK, and the picture will not be as jerky.
# The vertical size is always either fullscreen or 480/576 (NTSC/PAL)
# for tvtime.
# TV_VIEW_SIZE = (640, 480)
# TV_REC_SIZE = (320, 240) # Default for slower computers
# Input formats for viewing and recording. The format affect viewing
# and recording performance. It is specific to your hardware, so read
# the MPlayer docs and experiment with mplayer to see which one fits
# your computer best.
# TV_VIEW_OUTFMT = 'yuy2' # Better quality, slower on pure FB/X11
# TV_REC_OUTFMT = 'yuy2'
# XXX Please see the mencoder docs for more info about the settings
# XXX below. Some stuff must be changed (adevice), others probably
# XXX should be ("Change"), or could be in some cases ("change?")
# VCR_CMD = (CONF.mencoder + ' ' +
# 'tv:// ' + # New mplayer requires this.
# '-tv driver=%s:input=%d' % (TV_DRIVER, TV_INPUT) +
# ':norm=%s' % CONF.tv +
# ':channel=%(channel)s' + # Filled in by Freevo
# ':chanlist=%s' % CONF.chanlist +
# ':width=%d:height=%d' % (TV_REC_SIZE[0], TV_REC_SIZE[1]) +
# ':outfmt=%s' % TV_REC_OUTFMT +
# ':device=%s' % TV_DEVICE +
# VCR_AUDIO + # set above
# ' -ovc lavc -lavcopts ' + # Mencoder lavcodec video codec
# 'vcodec=mpeg4' + # lavcodec mpeg-4
# ':vbitrate=1200:' + # Change lower/higher, bitrate
# 'keyint=30 ' + # Keyframe every 10 secs, change?
# '-oac mp3lame -lameopts ' + # Use Lame for MP3 encoding, must be enabled in mencoder!
# 'br=128:cbr:mode=3 ' + # MP3 const. bitrate, 128 kbit/s
# '-ffourcc divx ' + # Force 'divx' ident, better compat.
# '-endpos %(seconds)s ' + # only mencoder uses this so do it here.
# '-o %(filename)s') # Filled in by Freevo
# TV_VIDEO_GROUPS setting to handles multiple arbitrary groups of devices
# for viewing or recording. It is possible to have different Freevo
# channels use different Video Groups.
#
# See the wiki for more details:
# http://doc.freevo.org/MultiTunerConfig
# http://doc.freevo.org/Analoguemulti
#
# This example is when you have one IVTV type card installed
#
# TV_VIDEO_GROUPS = [
# VideoGroup(vdev=TV_DEVICE,
# adev=AUDIO_DEVICE,
# input_type='tuner 1',
# input_num=0,
# tuner_norm=CONF.tv,
# tuner_chanlist=CONF.chanlist,
# group_type = 'ivtv',
# desc='PVR-350 Video Group'),
# ]
#
# This example is for two normal TV cards to allow you to use one card to
# view a programme and the other card to record at the same time
#
# TV_VIDEO_GROUPS = [
# # Use this group for watching tv
# VideoGroup(vdev='/dev/video0',
# adev=None,
# input_type='tuner 1',
# tuner_norm=CONF.tv,
# tuner_chanlist=CONF.chanlist,
# desc='Watching Video Group',
# record_group=1),
# # Use this group for recording tv
# VideoGroup(vdev='/dev/video1',
# adev=None,
# input_type='tuner 1',
# tuner_norm=CONF.tv,
# tuner_chanlist=CONF.chanlist,
# desc='Recording Video Group',
# record_group=None),
# ]
#
# Settings for ivtv based cards such as the WinTV PVR-250/350.
#
# TODO: Add descriptions and valid settings for each option.
# bitrate in bps
# stream type
# Options are: 0 (mpeg2_ps), 1 (mpeg2_ts), 2 (mpeg1), 3 (mpeg2_pes_av),
# 5 (mpeg2_pes_v), 7 (mpeg2_pes_a), 10 (dvd)
# TV_IVTV_OPTIONS = {
# 'input' : 4,
# 'resolution' : '720x480',
# 'aspect' : 2,
# 'audio_bitmask' : 233,
# 'bframes' : 3,
# 'bitrate_mode' : 1,
# 'bitrate' : 4000000,
# 'bitrate_peak' : 4000000,
# 'dnr_mode' : 0,
# 'dnr_spatial' : 0,
# 'dnr_temporal' : 0,
# 'dnr_type' : 0,
# 'framerate' : 0,
# 'framespergop' : 15,
# 'gop_closure' : 1,
# 'pulldown' : 0,
# 'stream_type' : 10,
# }
#
# TV Channels. This list contains a mapping from the displayed channel name
# to the actual channel name as used by the TV watching application.
# The display name must match the names from the XMLTV guide,
# and the TV channel name must be what the tuner expects (usually a number).
#
# The TV menu is supposed to be supported by the XMLTV application for
# up to date listings, but can be used without it to just display
# the available channels.
#
# This list also determines the order in which the channels are displayed!
# N.B.: You must delete the XMLTV cache file (e.g. /var/cache/freevo/TV.xml.pickled)
# if you make changes here and restart!
#
# Format: [('xmltv channel id', 'freevo display name', 'tv channel name'), ...]
#
# If this variable is set to None (default), Freevo will try to auto-detect
# the channel list based on the xmltv file. This doesn't work for all
# xmltv grabber, e.g. the German list doesn't contain station lists. In this
# case Freevo will output the possible list for you to add them manually.
#
# If auto-detection doesn't work or you want to edit the list, run
# freevo tv_grab -query.
#
# Setting this variable to [] will deactivate the tv guide. If you don't have
# a tv card, you may also want to add plugin.remove('tv') to remove the whole
# tv menu.
#
# All channels listed here will be displayed on the TV menu, even if they're
# not present in the XMLTV listing.
#
#
# Timedependent channels:
#
# The TV_CHANNELS-list can look like this:
#
# TV_CHANNELS = [('21', 'SVT1', 'E5'),
# ('22', 'SVT2', 'E3'),
# ('26', 'TV3', 'E10'),
# ('27', 'TV4', 'E6'),
# ('10', 'Kanal 5', 'E7'),
# ('60', 'Fox Kids', 'E8', ('1234567','0600','1659')),
# ('16', 'TV6', 'E8', ('1234567','1700','2359'),
# ('1234567','0000','0300')),
# ('14', 'MTV Europe', 'E11') ]
#
# As you can see the list takes optional tuples:
# ( 'DAYS', 'START','END')
#
# 1234567 in days means all days.
# 12345 would mean monday to friday.
#
# It will display "Fox Kids" from 06:00 to 16:59 and "TV6" from 17:00 to 03:00.
# 03:00 to 06:00 it won't be displayed at all.
#
# TV_CHANNELS = None
#
# A lambda function to sort the TV_CHANNELS
#
#TV_CHANNELS_COMPARE = lambda a, b: cmp(int(a[2]), int(b[2]))
#
# TV_FREQUENCY_TABLE - This is only used when Freevo changes the channel natively.
# This is only the case if you are using V4L2 and any of the following plugins:
# timeshift, ivtv_record, ivtv_basic_tv.
# For the standard frequancy tables see src/tv/freq.py. To add your own just
# replace tuner_id in the following example with a valid tuner id (ie: '5' or
# 'BBC1') and a frequency in KHz. You may have as many entries as you like,
# anything here will simply override a corresponding entry in your standard
# frequency table and you can also have entries here that are not present in
# there.
# TV_FREQUENCY_TABLE = {
# 'tuner_id' : 55250,
# }
#
# Program to grab xmltv listings. To get a grabber, you need to download
# xmltv. A possible value for users in the USA is tv_grab_na
# Use the tv_grab helper to grab the listings and cache them. Start
# 'freevo tv_grab --help' for more informations.
# XMLTV_GRABBER = ''
#
# If you want to run tv_sort on your listings add the path to tv_sort here.
# tv_sort will make sure all your programs have proper stop times, otherwise
# programs might get cut off at midnight.
#
# XMLTV_SORT = ''
#
# Number of days the grabber should get
#
# XMLTV_DAYS = 3
## ONLY ADJUST THIS IF YOUR GUIDE TIMES ARE INCORRECT ##
#
# GMT offset for XMLTV feeds that don't contain timezone information
# An example of this is the OzTivo feed which has the timestamps
# in the XML pre-adjusted for your timezone
#
# XMLTV_TIMEZONE='+0100'
# ======================================================================
# Freevo builtin encoding server settings:
# ======================================================================
# ENCODINGSERVER_UID = 0
# ENCODINGSERVER_GID = 0
#
# ENCODINGSERVER_IP = 'localhost'
# ENCODINGSERVER_PORT = 18002
# ENCODINGSERVER_SECRET = 'secret2'
# If the current directory is not writeable as the user then set
# ENCODINGSERVER_SAVEDIR to os.path.join(FREEVO_CACHEDIR, 'encodings')
# ENCODINGSERVER_SAVEDIR = '.'
# ======================================================================
# Freevo builtin commdetect server settings:
# ======================================================================
# COMMDETECTSERVER_UID = 0
# COMMDETECTSERVER_GID = 0
#
# COMMDETECTSERVER_IP = 'localhost'
# COMMDETECTSERVER_PORT = 6667
# ======================================================================
# Freevo builtin rss server settings:
# ======================================================================
# RSSSERVER_UID = 0
# RSSSERVER_GID = 0
# RSS_CHECK_INTERVAL = 3600
# RSS_FEEDS = '/etc/freevo/rss.feeds'
# RSS_DOWNLOAD = os.path.join(FREEVO_CACHEDIR, 'rssdownloads')
# RSS_VIDEO = '/media/video'
# RSS_AUDIO = '/media/podcasts'
# ======================================================================
# Freevo builtin WWW server settings:
# ======================================================================
#
# To activate the built in web server, uncomment the following line
#
# plugin.activate('www')
#
# Web server port number. 80 is the standard port, but is often
# taken already by apache, and cannot be used unless the server
# runs as root. Port 8080 is the default, change to 80 if
# needed.
#
# WEBSERVER_PORT = 8080
#
# Username / Password combinations to login to the web interface.
#
# WWW_USERS = {
# "user1" : "changeme",
# "optional" : "changeme2"
# }
#
# items to include on the web pages
# WWW_PAGES = [
# # Label Title Page
# (_('Home'), _('Home'), 'index.rpy'),
# (_('TV Guide'), _('View TV Listings'), 'guide.rpy'),
# (_('Scheduled Recordings'), _('View Scheduled Recordings'), 'record.rpy'),
# (_('Favorites'), _('View Favorites'), 'favorites.rpy'),
# (_('Media Library'), _('View Media Library'), 'library.rpy'),
# (_('Manual Recording'), _('Schedule a Manual Recording'), 'manualrecord.rpy'),
# ('Config', 'Configuration Information', 'config.rpy'),
# ('Plugins', 'Plug-in Configuration Information', 'pluginconfig.rpy'),
# ('YouTube', 'YouTube', 'youtube.rpy'),
# ('Logs', 'Logs', 'viewlogfile.rpy'),
# ('My Page', 'My Page Title' 'mypage.html'),
# ('My Site', 'My Site Title' 'mysite/'),
# (_('Search'), _('Advanced Search Page'), 'search.rpy'),
# (_('Help'), _('View Online Help and Documentation'), 'help/')
# ]
#
# Some sizes for the images in the web library
# Can be a tuple of sizes or a size
#
# WWW_IMAGE_SIZE = (1024, 768)
# WWW_IMAGE_THUMBNAIL_SIZE = 256
#
# Divide the TV guide into intervals of this length (in minutes)
#
# WWW_GUIDE_INTERVAL = 30
#
# Precision for TV guide (in minutes)
#
# WWW_GUIDE_PRECISION = 5 #
#
# Show this many blocks at once
#
# WWW_GUIDE_COLS = 6
# WWW_STYLESHEET = 'styles/main.css'
# WWW_JAVASCRIPT = 'scripts/display_prog-head.js'
# Integrate personal web pages into the web server
# WWW_PERSONAL_PAGE = [
# ('My Page', 'a single personal web page', 'page.html'),
# ('My Site', 'site description', 'site/'),
# ]
# ======================================================================
# Internal stuff, you shouldn't change anything here unless you know
# what you are doing
# ======================================================================
#
# Config for xml support in the movie browser
# the regexp has to be with ([0-9]|[0-9][0-9]) so we can get the numbers
#
# VIDEO_SHOW_REGEXP = "s?([0-9]|[0-9][0-9])[xe]([0-9]|[0-9][0-9])[^0-9]"
#
# Directory containing images for TV shows. A TV show matches the regular
# expression VIDEO_SHOW_REGEXP, e.g. "Name 3x10 - Title". If an image
# name.(png|jpg) (lower-case) is in this directory, it will be taken as cover
# image
#
# VIDEO_SHOW_DATA_DIR = None
#
# Remote control daemon. The server is in the Freevo main application,
# and the client is a standalone application in rc_client/
#
# ENABLE_NETWORK_REMOTE = 0
# REMOTE_CONTROL_HOST = '127.0.0.1'
# REMOTE_CONTROL_PORT = 16310
#
# Remote control daemon. Similar to the one above, but uses TCP instead
# of UDP. It is possible to send commands with a telnet client.
#
# ENABLE_TCP_NETWORK_REMOTE = 0
# REMOTE_CONTROL_TCP_HOST = '127.0.0.1'
# REMOTE_CONTROL_TCP_PORT = 16311
#
# XMLTV File
#
# This is the XMLTV file that can be optionally used for TV listings
#
# XMLTV_FILE = '/tmp/TV.xml'
#
# XML TV Logo Location
#
# Use the "makelogos.py" script to download all the
# Station logos into a directory. And then put the path
# to those logos here
# TV_LOGOS = OS_CACHEDIR + '/xmltv/logos'
# ======================================================================
# Idlebar and plugins
#
# These are the default idlebar settings, there are many more see:
# freevo plugins -l for a complete list
# To turn off use plugin.remove('idlebar.cdstatus')
#
# clock display
# %A - Full weekday name.
# %a - Abbreviated weekday name.
# %B - Full month name
# %b - Abbreviated month name
# %I - Hour (12-hour clock) as a decimal number [01,12].
# %H - Hour (24-hour clock) as a decimal number [00,23].
# %M - Minute as a decimal number [00,59].
# %m - Month as a decimal number [01,12].
# %b - Name of Month
# %d - Day of the month as a decimal number [01,31].
# %p - Locale's equivalent of either AM or PM.
# %y - Year without century as a decimal number [00,99].
# %Y - Year with century as a decimal number.
# ======================================================================
# plugin.activate('idlebar')
# plugin.activate('idlebar.tv', level=20)
# plugin.activate('idlebar.cdstatus', level=25)
# plugin.activate('idlebar.diskfree', level=30)
# plugin.activate('idlebar.clock', level=50)
# CLOCK_FORMAT = '%a %d %H:%M'
#
#
# Debugging and logging settings
#
# logging can be one of:
# CRITICAL, ERROR, WARNING, INFO, DEBUG or NOTSET
#
# debugging is a number from 0 to 9, 0 is no debugging 9 is most
#
# LOGGING = logging.DEBUG
# LOGGING_RECORDSERVER = logging.DEBUG
# DEBUG = 1
# messages go to stdout
# DEBUG_STDOUT = 0
# messages from starting a child application
# DEBUG_CHILDAPP = 1
# timing messages
# DEBUG_TIME = 0
| gpl-2.0 |
pigeonflight/strider-plone | docker/appengine/lib/django-1.5/django/db/backends/postgresql_psycopg2/creation.py | 107 | 4139 | import psycopg2.extensions
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
output = []
if f.db_index or f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
if not f.unique:
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
return output
def set_autocommit(self):
self._prepare_for_test_db_ddl()
def _prepare_for_test_db_ddl(self):
"""Rollback and close the active transaction."""
self.connection.connection.rollback()
self.connection.connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
| mit |
tedder/ansible | lib/ansible/modules/network/checkpoint/checkpoint_host.py | 7 | 6023 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: checkpoint_host
short_description: Manages host objects on Checkpoint over Web Services API
description:
- Manages host objects on Checkpoint devices including creating, updating, removing access rules objects.
All operations are performed over Web Services API.
version_added: "2.8"
author: "Ansible by Red Hat (@rcarrillocruz)"
options:
name:
description:
- Name of the access rule.
type: str
required: True
ip_address:
description:
- IP address of the host object.
type: str
state:
description:
- State of the access rule (present or absent). Defaults to present.
type: str
default: present
auto_publish_session:
description:
- Publish the current session if changes have been performed
after task completes.
type: bool
default: 'yes'
auto_install_policy:
description:
- Install the package policy if changes have been performed
after the task completes.
type: bool
default: 'yes'
policy_package:
description:
- Package policy name to be installed.
type: str
default: 'standard'
targets:
description:
- Targets to install the package policy on.
type: list
"""
EXAMPLES = """
- name: Create host object
checkpoint_host:
name: attacker
ip_address: 192.168.0.15
- name: Delete host object
checkpoint_host:
name: attacker
state: absent
"""
RETURN = """
checkpoint_hosts:
description: The checkpoint host object created or updated.
returned: always, except when deleting the host.
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec, publish, install_policy
import json
def get_host(module, connection):
name = module.params['name']
payload = {'name': name}
code, response = connection.send_request('/web_api/show-host', payload)
return code, response
def create_host(module, connection):
name = module.params['name']
ip_address = module.params['ip_address']
payload = {'name': name,
'ip-address': ip_address}
code, response = connection.send_request('/web_api/add-host', payload)
return code, response
def update_host(module, connection):
name = module.params['name']
ip_address = module.params['ip_address']
payload = {'name': name,
'ip-address': ip_address}
code, response = connection.send_request('/web_api/set-host', payload)
return code, response
def delete_host(module, connection):
name = module.params['name']
ip_address = module.params['ip_address']
payload = {'name': name}
code, response = connection.send_request('/web_api/delete-host', payload)
return code, response
def needs_update(module, host):
res = False
if module.params['ip_address'] != host['ipv4-address']:
res = True
return res
def main():
argument_spec = dict(
name=dict(type='str', required=True),
ip_address=dict(type='str'),
state=dict(type='str', default='present')
)
argument_spec.update(checkpoint_argument_spec)
required_if = [('state', 'present', 'ip_address')]
module = AnsibleModule(argument_spec=argument_spec)
connection = Connection(module._socket_path)
code, response = get_host(module, connection)
result = {'changed': False}
if module.params['state'] == 'present':
if code == 200:
if needs_update(module, response):
code, response = update_host(module, connection)
if module.params['auto_publish_session']:
publish(connection)
if module.params['auto_install_policy']:
install_policy(connection, module.params['policy_package'], module.params['targets'])
result['changed'] = True
result['checkpoint_hosts'] = response
else:
pass
elif code == 404:
code, response = create_host(module, connection)
if module.params['auto_publish_session']:
publish(connection)
if module.params['auto_install_policy']:
install_policy(connection, module.params['policy_package'], module.params['targets'])
result['changed'] = True
result['checkpoint_hosts'] = response
else:
if code == 200:
# Handle deletion
code, response = delete_host(module, connection)
if module.params['auto_publish_session']:
publish(connection)
if module.params['auto_install_policy']:
install_policy(connection, module.params['policy_package'], module.params['targets'])
result['changed'] = True
elif code == 404:
pass
result['checkpoint_session_uid'] = connection.get_session_uid()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
fp7-netide/Tools | debugger/Core/debugger.py | 1 | 7289 | #!/usr/bin/env python
import zmq
import sys
import time
import binascii
import argparse
import csv
#from scapy.utils import wrpcap
sys.path.insert(0,'../../../Engine/libraries/netip/python/')
sys.path.insert(0,'../../../ryu/ryu/')
from netip import *
from ofproto import ofproto_parser
from ofproto import ofproto_common
from ofproto import ofproto_protocol
from ofproto import ofproto_v1_0_parser
from ofproto import ofproto_v1_2_parser
from ofproto import ofproto_v1_3_parser
from ofproto import ofproto_v1_4_parser
from ofproto import ofproto_v1_5_parser
###################### headers for pcap creation ####################################
#Global header for pcap 2.4
pcap_global_header = ('D4 C3 B2 A1'
'02 00' #File format major revision (i.e. pcap <2>.4)
'04 00' #File format minor revision (i.e. pcap 2.<4>)
'00 00 00 00'
'00 00 00 00'
'FF FF 00 00'
'93 00 00 00') #user_protocol selected, without Ip and tcp headers
#pcap packet header that must preface every packet
pcap_packet_header = ('AA 77 9F 47'
'90 A2 04 00'
'XX XX XX XX' #Frame Size (little endian)
'YY YY YY YY') #Frame Size (little endian)
#netide packet header that must preface every packet
netide_header = ('01' #netide protocol version 1.1
'11' #openflow type
'XX XX' #Frame Size (little endian)
'01 00 00 00' #xid
'00 00 00 00 00 00 00 06') #datapath_id
######################################################################################
###################### PCAP generation ########################################
def getByteLength(str1):
return len(''.join(str1.split())) / 2
# return len(str1)
def generatePCAP(message,i):
msg_len = getByteLength(message)
# netide = netide_header.replace('XX XX',"%04x"%msg_len)
# net_len = getByteLength(netide_header)
# pcap_len = net_len + msg_len
hex_str = "%08x"%msg_len
reverse_hex_str = hex_str[6:] + hex_str[4:6] + hex_str[2:4] + hex_str[:2]
pcaph = pcap_packet_header.replace('XX XX XX XX',reverse_hex_str)
pcaph = pcaph.replace('YY YY YY YY',reverse_hex_str)
if (i==0):
# bytestring = pcap_global_header + pcaph + eth_header + ip + tcp + message
# bytestring = pcap_global_header + pcaph + netide + message
bytestring = pcap_global_header + pcaph + message
else:
# bytestring = pcaph + eth_header + ip + tcp + message
# bytestring = pcaph + netide + message
bytestring = pcaph + message
return bytestring
# writeByteStringToFile(bytestring, pcapfile)
#Splits the string into a list of tokens every n characters
def splitN(str1,n):
return [str1[start:start+n] for start in range(0, len(str1), n)]
def sum_one(i):
return i + 1
##############################################################################
parser = argparse.ArgumentParser(description='Launch the NetIDE debugger')
parser.add_argument('-o', help='Output Folder', default=".")
args = parser.parse_args()
fo = open(args.o+"/results.txt", "w")
bitout = open(args.o+"/results.pcap", 'wb')
csvfile = open(args.o+"/results.card", "w")
fieldnames = ['timestamp', 'origin', 'destination', 'msg', 'length']
#fieldnames = ['timestamp', 'origin', 'destination', 'msg']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect("tcp://localhost:5557")
socket.setsockopt(zmq.SUBSCRIBE, "")
i = 0
print('[*] Waiting for logs. To exit press CTRL+C')
while True:
dst_field, src_field, msg = socket.recv_multipart()
t=time.strftime("%H:%M:%S")
dst_field = str(dst_field)
msg_str = str(msg)
src_field = str(src_field)
msg_hexadecimal = binascii.hexlify(msg)
#print(src_field, dst_field)
if src_field.startswith("0_", 0, 2) == True:
origin = src_field[2:]
destination = "core"
elif src_field.startswith("1_", 0, 2) == True:
origin = src_field[2:]
destination = "core"
elif src_field.startswith("2_", 0, 2) == True:
origin = "core"
destination = src_field[2:]
elif src_field.startswith("3_", 0, 2) == True:
origin = "core"
destination = src_field[2:]
#msg_cap = binascii.hexlify(msg)
bytestring = generatePCAP(msg_hexadecimal,i)
i = sum_one(i)
bytelist = bytestring.split()
bytes = binascii.a2b_hex(''.join(bytelist))
bitout.write(bytes)
(netide_version, netide_msg_type, netide_msg_len, netide_xid, netide_mod_id, netide_datapath) = NetIDEOps.netIDE_decode_header(msg)
netide_msg_type_v2 = NetIDEOps.key_by_value(NetIDEOps.NetIDE_type, netide_msg_type)
message_data = msg[NetIDEOps.NetIDE_Header_Size:]
ret = bytearray(message_data)
writer.writerow({'timestamp':t, 'origin':origin, 'destination':destination, 'msg':msg_hexadecimal, 'length':len(ret)})
if len(ret) >= ofproto_common.OFP_HEADER_SIZE:
(version, msg_type, msg_len, xid) = ofproto_parser.header(ret)
msg_decoded = ofproto_parser.msg(netide_datapath, version, msg_type, msg_len, xid, ret)
elif len(ret) < ofproto_common.OFP_HEADER_SIZE:
(version, msg_type, msg_len, xid, msg_decoded) = ("", "", "", "", "")
#if dst_field[2:] == "shim":
#if 'msg_decoded' in locals() or 'msg_decoded' in globals():
print "New message from %r to %r at %r"%(origin, destination, t)
print "\033[1;32mNetIDE header: Version = %r, Type of msg = %r, Length = %r Bytes, XID = %r, Module ID = %r, Datapath = %r\033[1;m"% (netide_version, netide_msg_type_v2, netide_msg_len, netide_xid, netide_mod_id, netide_datapath)
print '\033[1;32mOpenFlow message header: Version = %r, Type of msg = %r, Length = %r Bytes, XID = %r\033[1;m'% (version, msg_type, msg_len, xid)
print '\033[1;32mOpenFlow message: %r \033[1;m'% (msg_decoded)
print "\n"
#writer.writerow({'timestamp':t, 'origin':dst_field, 'destination':src_field, 'msg':msg_hexadecimal, 'length':msg_len})
fo.write("[%r] [%r] [%r] %r \n"% (t, origin, destination, msg_decoded))
#else:
#if 'msg_decoded' in locals() or 'msg_decoded' in globals():
#print "New message from backend %r to %r at %r"%(dst_field, src_field, t)
#print "\033[1;36mNetIDE header: Version = %r, Type of msg = %r, Length = %r Bytes, XID = %r, Module ID = %r, Datapath = %r\033[1;m"% (netide_version, netide_msg_type_v2, netide_msg_len, netide_xid, netide_mod_id, netide_datapath)
#print '\033[1;36mOpenFlow message header: Version = %r, Type of msg = %r, Length = %r Bytes, XID = %r\033[1;m'% (version, msg_type, msg_len, xid)
#print '\033[1;36mOpenFlow message: %r \033[1;m'% (msg_decoded)
#print "\n"
#writer.writerow({'timestamp':t, 'origin':dst_field, 'destination':src_field, 'msg':msg_hexadecimal, 'length':msg_len})
#fo.write("[%r] [%r] %r \n"% (t, dst_field, msg_decoded))
fo.close()
bitout.close()
writer.close() | epl-1.0 |
IndonesiaX/edx-platform | openedx/core/lib/tests/assertions/events.py | 174 | 9803 | """Assertions related to event validation"""
import json
import pprint
def assert_event_matches(expected, actual, tolerate=None):
"""
Compare two event dictionaries.
Fail if any discrepancies exist, and output the list of all discrepancies. The intent is to produce clearer
error messages than "{ some massive dict } != { some other massive dict }", instead enumerating the keys that
differ. Produces period separated "paths" to keys in the output, so "context.foo" refers to the following
structure:
{
'context': {
'foo': 'bar' # this key, value pair
}
}
The other key difference between this comparison and `assertEquals` is that it supports differing levels of
tolerance for discrepancies. We don't want to litter our tests full of exact match tests because then anytime we
add a field to all events, we have to go update every single test that has a hardcoded complete event structure in
it. Instead we support making partial assertions about structure and content of the event. So if I say my expected
event looks like this:
{
'event_type': 'foo.bar',
'event': {
'user_id': 10
}
}
This method will raise an assertion error if the actual event either does not contain the above fields in their
exact locations in the hierarchy, or if it does contain them but has different values for them. Note that it will
*not* necessarily raise an assertion error if the actual event contains other fields that are not listed in the
expected event. For example, the following event would not raise an assertion error:
{
'event_type': 'foo.bar',
'referer': 'http://example.com'
'event': {
'user_id': 10
}
}
Note that the extra "referer" field is not considered an error by default.
The `tolerate` parameter takes a set that allows you to specify varying degrees of tolerance for some common
eventing related issues. See the `EventMatchTolerates` class for more information about the various flags that are
supported here.
Example output if an error is found:
Unexpected differences found in structs:
* <path>: not found in actual
* <path>: <expected_value> != <actual_value> (expected != actual)
Expected:
{ <expected event }
Actual:
{ <actual event> }
"<path>" is a "." separated string indicating the key that differed. In the examples above "event.user_id" would
refer to the value of the "user_id" field contained within the dictionary referred to by the "event" field in the
root dictionary.
"""
differences = get_event_differences(expected, actual, tolerate=tolerate)
if len(differences) > 0:
debug_info = [
'',
'Expected:',
block_indent(expected),
'Actual:',
block_indent(actual),
'Tolerating:',
block_indent(EventMatchTolerates.default_if_not_defined(tolerate)),
]
differences = ['* ' + d for d in differences]
message_lines = differences + debug_info
raise AssertionError('Unexpected differences found in structs:\n\n' + '\n'.join(message_lines))
class EventMatchTolerates(object):
"""
Represents groups of flags that specify the level of tolerance for deviation between an expected event and an actual
event.
These are common event specific deviations that we don't want to handle with special case logic throughout our
tests.
"""
# Allow the "event" field to be a string, currently this is the case for all browser events.
STRING_PAYLOAD = 'string_payload'
# Allow unexpected fields to exist in the top level event dictionary.
ROOT_EXTRA_FIELDS = 'root_extra_fields'
# Allow unexpected fields to exist in the "context" dictionary. This is where new fields that appear in multiple
# events are most commonly added, so we frequently want to tolerate variation here.
CONTEXT_EXTRA_FIELDS = 'context_extra_fields'
# Allow unexpected fields to exist in the "event" dictionary. Typically in unit tests we don't want to allow this
# type of variance since there are typically only a small number of tests for a particular event type.
PAYLOAD_EXTRA_FIELDS = 'payload_extra_fields'
@classmethod
def default(cls):
"""A reasonable set of tolerated variations."""
# NOTE: "payload_extra_fields" is deliberately excluded from this list since we want to detect erroneously added
# fields in the payload by default.
return {
cls.STRING_PAYLOAD,
cls.ROOT_EXTRA_FIELDS,
cls.CONTEXT_EXTRA_FIELDS,
}
@classmethod
def lenient(cls):
"""Allow all known variations."""
return cls.default() | {
cls.PAYLOAD_EXTRA_FIELDS
}
@classmethod
def strict(cls):
"""Allow no variation at all."""
return frozenset()
@classmethod
def default_if_not_defined(cls, tolerates=None):
"""Use the provided tolerance or provide a default one if None was specified."""
if tolerates is None:
return cls.default()
else:
return tolerates
def assert_events_equal(expected, actual):
"""
Strict comparison of two events.
This asserts that every field in the real event exactly matches the expected event.
"""
assert_event_matches(expected, actual, tolerate=EventMatchTolerates.strict())
def get_event_differences(expected, actual, tolerate=None):
"""Given two events, gather a list of differences between them given some set of tolerated variances."""
tolerate = EventMatchTolerates.default_if_not_defined(tolerate)
# Some events store their payload in a JSON string instead of a dict. Comparing these strings can be problematic
# since the keys may be in different orders, so we parse the string here if we were expecting a dict.
if EventMatchTolerates.STRING_PAYLOAD in tolerate:
expected = parse_event_payload(expected)
actual = parse_event_payload(actual)
def should_strict_compare(path):
"""
We want to be able to vary the degree of strictness we apply depending on the testing context.
Some tests will want to assert that the entire event matches exactly, others will tolerate some variance in the
context or root fields, but not in the payload (for example).
"""
if path == [] and EventMatchTolerates.ROOT_EXTRA_FIELDS in tolerate:
return False
elif path == ['event'] and EventMatchTolerates.PAYLOAD_EXTRA_FIELDS in tolerate:
return False
elif path == ['context'] and EventMatchTolerates.CONTEXT_EXTRA_FIELDS in tolerate:
return False
else:
return True
return compare_structs(expected, actual, should_strict_compare=should_strict_compare)
def block_indent(text, spaces=4):
"""
Given a multi-line string, indent every line of it by the given number of spaces.
If `text` is not a string it is formatted using pprint.pformat.
"""
return '\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])
def parse_event_payload(event):
"""
Given an event, parse the "event" field as a JSON string.
Note that this may simply return the same event unchanged, or return a new copy of the event with the payload
parsed. It will never modify the event in place.
"""
if 'event' in event and isinstance(event['event'], basestring):
event = event.copy()
try:
event['event'] = json.loads(event['event'])
except ValueError:
pass
return event
def compare_structs(expected, actual, should_strict_compare=None, path=None):
"""
Traverse two structures to ensure that the `actual` structure contains all of the elements within the `expected`
one.
Note that this performs a "deep" comparison, descending into dictionaries, lists and ohter collections to ensure
that the structure matches the expectation.
If a particular value is not recognized, it is simply compared using the "!=" operator.
"""
if path is None:
path = []
differences = []
if isinstance(expected, dict) and isinstance(actual, dict):
expected_keys = frozenset(expected.keys())
actual_keys = frozenset(actual.keys())
for key in expected_keys - actual_keys:
differences.append('{0}: not found in actual'.format(_path_to_string(path + [key])))
if should_strict_compare is not None and should_strict_compare(path):
for key in actual_keys - expected_keys:
differences.append('{0}: only defined in actual'.format(_path_to_string(path + [key])))
for key in expected_keys & actual_keys:
child_differences = compare_structs(expected[key], actual[key], should_strict_compare, path + [key])
differences.extend(child_differences)
elif expected != actual:
differences.append('{path}: {a} != {b} (expected != actual)'.format(
path=_path_to_string(path),
a=repr(expected),
b=repr(actual)
))
return differences
def is_matching_event(expected_event, actual_event, tolerate=None):
"""Return True iff the `actual_event` matches the `expected_event` given the tolerances."""
return len(get_event_differences(expected_event, actual_event, tolerate=tolerate)) == 0
def _path_to_string(path):
"""Convert a list of path elements into a single path string."""
return '.'.join(path)
| agpl-3.0 |
mationic/pyload | module/lib/jinja2/constants.py | 1169 | 1626 | # -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
| gpl-3.0 |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/gunicorn/http/body.py | 153 | 7355 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator,
InvalidChunkSize)
from gunicorn import six
class ChunkedReader(object):
def __init__(self, req, unreader):
self.req = req
self.parser = self.parse_chunked(unreader)
self.buf = six.BytesIO()
def read(self, size):
if not isinstance(size, six.integer_types):
raise TypeError("size must be an integral type")
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
if self.parser:
while self.buf.tell() < size:
try:
self.buf.write(six.next(self.parser))
except StopIteration:
self.parser = None
break
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = six.BytesIO()
self.buf.write(rest)
return ret
def parse_trailers(self, unreader, data):
buf = six.BytesIO()
buf.write(data)
idx = buf.getvalue().find(b"\r\n\r\n")
done = buf.getvalue()[:2] == b"\r\n"
while idx < 0 and not done:
self.get_data(unreader, buf)
idx = buf.getvalue().find(b"\r\n\r\n")
done = buf.getvalue()[:2] == b"\r\n"
if done:
unreader.unread(buf.getvalue()[2:])
return b""
self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx])
unreader.unread(buf.getvalue()[idx + 4:])
def parse_chunked(self, unreader):
(size, rest) = self.parse_chunk_size(unreader)
while size > 0:
while size > len(rest):
size -= len(rest)
yield rest
rest = unreader.read()
if not rest:
raise NoMoreData()
yield rest[:size]
# Remove \r\n after chunk
rest = rest[size:]
while len(rest) < 2:
rest += unreader.read()
if rest[:2] != b'\r\n':
raise ChunkMissingTerminator(rest[:2])
(size, rest) = self.parse_chunk_size(unreader, data=rest[2:])
def parse_chunk_size(self, unreader, data=None):
buf = six.BytesIO()
if data is not None:
buf.write(data)
idx = buf.getvalue().find(b"\r\n")
while idx < 0:
self.get_data(unreader, buf)
idx = buf.getvalue().find(b"\r\n")
data = buf.getvalue()
line, rest_chunk = data[:idx], data[idx + 2:]
chunk_size = line.split(b";", 1)[0].strip()
try:
chunk_size = int(chunk_size, 16)
except ValueError:
raise InvalidChunkSize(chunk_size)
if chunk_size == 0:
try:
self.parse_trailers(unreader, rest_chunk)
except NoMoreData:
pass
return (0, None)
return (chunk_size, rest_chunk)
def get_data(self, unreader, buf):
data = unreader.read()
if not data:
raise NoMoreData()
buf.write(data)
class LengthReader(object):
def __init__(self, unreader, length):
self.unreader = unreader
self.length = length
def read(self, size):
if not isinstance(size, six.integer_types):
raise TypeError("size must be an integral type")
size = min(self.length, size)
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
buf = six.BytesIO()
data = self.unreader.read()
while data:
buf.write(data)
if buf.tell() >= size:
break
data = self.unreader.read()
buf = buf.getvalue()
ret, rest = buf[:size], buf[size:]
self.unreader.unread(rest)
self.length -= size
return ret
class EOFReader(object):
def __init__(self, unreader):
self.unreader = unreader
self.buf = six.BytesIO()
self.finished = False
def read(self, size):
if not isinstance(size, six.integer_types):
raise TypeError("size must be an integral type")
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
if self.finished:
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = six.BytesIO()
self.buf.write(rest)
return ret
data = self.unreader.read()
while data:
self.buf.write(data)
if self.buf.tell() > size:
break
data = self.unreader.read()
if not data:
self.finished = True
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = six.BytesIO()
self.buf.write(rest)
return ret
class Body(object):
def __init__(self, reader):
self.reader = reader
self.buf = six.BytesIO()
def __iter__(self):
return self
def __next__(self):
ret = self.readline()
if not ret:
raise StopIteration()
return ret
next = __next__
def getsize(self, size):
if size is None:
return six.MAXSIZE
elif not isinstance(size, six.integer_types):
raise TypeError("size must be an integral type")
elif size < 0:
return six.MAXSIZE
return size
def read(self, size=None):
size = self.getsize(size)
if size == 0:
return b""
if size < self.buf.tell():
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = six.BytesIO()
self.buf.write(rest)
return ret
while size > self.buf.tell():
data = self.reader.read(1024)
if not len(data):
break
self.buf.write(data)
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = six.BytesIO()
self.buf.write(rest)
return ret
def readline(self, size=None):
size = self.getsize(size)
if size == 0:
return b""
data = self.buf.getvalue()
self.buf = six.BytesIO()
ret = []
while 1:
idx = data.find(b"\n", 0, size)
idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0
if idx:
ret.append(data[:idx])
self.buf.write(data[idx:])
break
ret.append(data)
size -= len(data)
data = self.reader.read(min(1024, size))
if not data:
break
return b"".join(ret)
def readlines(self, size=None):
ret = []
data = self.read()
while len(data):
pos = data.find(b"\n")
if pos < 0:
ret.append(data)
data = b""
else:
line, data = data[:pos + 1], data[pos + 1:]
ret.append(line)
return ret
| apache-2.0 |
kaarl/pyload | module/plugins/hoster/UptoboxCom.py | 2 | 1495 | # -*- coding: utf-8 -*-
from module.plugins.internal.XFSHoster import XFSHoster
class UptoboxCom(XFSHoster):
__name__ = "UptoboxCom"
__type__ = "hoster"
__version__ = "0.27"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?(uptobox|uptostream)\.com/\w{12}'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("fallback" , "bool", "Fallback to free download if premium fails" , True),
("chk_filesize", "bool", "Check file size" , True),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """Uptobox.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
PLUGIN_DOMAIN = "uptobox.com"
INFO_PATTERN = r'"para_title">(?P<N>.+) \((?P<S>[\d.,]+) (?P<U>[\w^_]+)\)'
OFFLINE_PATTERN = r'>(File not found|Access Denied|404 Not Found)'
TEMP_OFFLINE_PATTERN = r'>Service Unavailable'
LINK_PATTERN = r'"(https?://(?:obwp\d+\.uptobox\.com|\w+\.uptobox\.com/d)/.*?)"'
DL_LIMIT_PATTERN = r'>You have to wait (.+) to launch a new download<'
def setup(self):
self.multiDL = True
self.chunk_limit = 1
self.resume_download = True
| gpl-3.0 |
stormpath/stormpath-sdk-android | docs/conf.py | 2 | 9336 | # -*- coding: utf-8 -*-
#
# stormpath-sdk-android documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 14 10:49:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stormpath-sdk-android'
copyright = u'2016, Eric Lamison-White'
author = u'Eric Lamison-White'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'stormpath'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'stormpath-sdk-androiddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'stormpath-sdk-android.tex', u'stormpath-sdk-android Documentation',
u'Eric Lamison-White', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'stormpath-sdk-android', u'stormpath-sdk-android Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'stormpath-sdk-android', u'stormpath-sdk-android Documentation',
author, 'stormpath-sdk-android', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.